diff --git "a/2514.jsonl" "b/2514.jsonl" new file mode 100644--- /dev/null +++ "b/2514.jsonl" @@ -0,0 +1,1070 @@ +{"seq_id":"17049394250","text":"\nimport pandas as pd\nimport csv\n\n\ndef Split_data():\n cmt = []\n type_cmt = []\n # with open('./data/lazada-dongho-raw.csv', 'r') as csvFile:\n # reader = csv.reader(csvFile)\n # for row in reader:\n # print(row['Comment'])\n with open(('./comment_rate.json'),'r', encoding=\"utf8\") as f:\n for line in f:\n line = line.rstrip('\\n')\n split_cmt = line.split('.')\n for i in range(len(split_cmt)):\n if(len(split_cmt[i]) > 5 ) :\n cmt.append(split_cmt[i])\n print(len(cmt))\n for i in range(len(cmt)):\n type_value = 0\n type_cmt.append(type_value)\n try:\n datalist = pd.read_csv('./comment_rate.csv')\n except:\n datalist = pd.DataFrame(columns=['Comment', 'Type'])\n # add data to data list\n new_datalist = pd.DataFrame(columns=['Comment', 'Type'])\n new_datalist['Comment'] = cmt\n new_datalist['Type'] = type_cmt\n\n datalist = datalist.append(new_datalist, ignore_index=True, sort=False)\n # save file\n datalist[['Comment', 'Type']].to_csv('./comment_rate.csv', encoding='utf-8-sig')\n\nif __name__ == '__main__':\n Split_data()\n","repo_name":"anhtuan96a8/FinalProjectVersion2","sub_path":"Sentiment-Analysis/database-processing/Split.py","file_name":"Split.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6644172976","text":"import os\nfrom pathlib import Path\nfrom typing import Union\n\nimport pygrib\nfrom datetime import datetime, timedelta\n\nfrom scipy.interpolate import interpolate\n\nfrom gfs_archive_0_25.utils import get_nearest_coords\nfrom gfs_common.common import get_param_key\nfrom gfs_oper.common import Config, InitMeta\nfrom gfs_oper.fetch_oper import get_init_meta, GribResource\nfrom util.coords import Coords\nimport pandas as pd\n\nfrom wind_forecast.util.config import process_config\n\n\ndef process_grib(grib: GribResource, grib_dir: str, target_coords: Coords, output_path: str):\n output_file = os.path.join(os.path.dirname(grib.get_output_location(output_path)),\n f\"{str(target_coords.nlat)}-{str(target_coords.wlon)}-{str(grib.offset)}.csv\")\n if os.path.exists(output_file):\n print(f\"Skipping grib processing, because file at {output_file} already exists\")\n return\n print(f\"Processing grib {grib.get_output_location(grib_dir)}...\")\n\n param_config = process_config(os.path.join(Path(__file__).parent, \"param_config.json\")).params\n data = {}\n\n gr = pygrib.open(grib.get_output_location(grib_dir))\n\n for parameter in param_config:\n try:\n if 'type_of_level' in parameter:\n message = gr.select(shortName=parameter['grib_name'], typeOfLevel=parameter['type_of_level'],\n level=parameter['grib_level'])[0]\n else:\n message = gr.select(shortName=parameter['grib_name'])[0]\n coords = get_nearest_coords(target_coords)\n values = message.data(lat1=coords.slat,\n lat2=coords.nlat,\n lon1=coords.wlon,\n lon2=coords.elon)\n interpolated_function_for_values = interpolate.interp2d([coords.slat, coords.nlat], [coords.wlon, coords.elon], values[0][::-1])\n data[get_param_key(parameter['name'], parameter['level'])] = interpolated_function_for_values(target_coords.nlat, target_coords.wlon).item()\n except:\n print(f\"Data not found in grib file for parameter {parameter['name']}. Setting it to 0.0.\")\n data[get_param_key(parameter['name'], parameter['level'])] = 0.0\n gr.close()\n data['date'] = (grib.init_meta.get_date_string_for_offset(grib.offset))\n df = pd.DataFrame([data])\n\n os.makedirs(os.path.dirname(output_file), exist_ok=True)\n df.to_csv(output_file, index=False)\n\n\ndef process_future_gribs(init_meta: InitMeta, config: Config):\n for offset in range(0, config.future_sequence_length):\n process_grib(GribResource(init_meta, offset), config.download_path, config.target_coords, config.processing_output_path)\n\n\ndef process_past_gribs(init_meta, config: Config):\n for init in range(0, (config.past_sequence_length // 6) + 1):\n init_meta = init_meta.get_previous()\n for offset in range(0, 6):\n process_grib(GribResource(init_meta, offset), config.download_path, config.target_coords,\n config.processing_output_path)\n\n\ndef process_all_needed_gribs(init_meta: InitMeta, config: Config) -> None:\n process_future_gribs(init_meta, config)\n process_past_gribs(init_meta, config)\n\n\ndef save_info(init_meta, processing_output_path):\n os.makedirs(os.path.dirname(available_starting_points_path(processing_output_path)), exist_ok=True)\n with open(available_starting_points_path(processing_output_path), 'a') as f:\n f.write(init_meta.get_date_string() + \"\\n\")\n\n\ndef available_starting_points_path(processing_output_path: str) -> str:\n return os.path.join(Path(__file__).parent, processing_output_path, \"available_starting_points.txt\")\n\n\ndef is_forecast_ready(init_meta: InitMeta, processing_output_path: str) -> Union[str, None]:\n records_file = available_starting_points_path(processing_output_path)\n if os.path.exists(records_file):\n with open(records_file, 'r') as f:\n for line in f:\n if init_meta.get_date_string() + \"\\n\" == line:\n return init_meta.get_date_string()\n\n return None\n\n\n\"\"\"\nProcesses the most recent available forecast.\nReturns the string indicating the run of the forecast or None if none available for 5 recent runs\n\"\"\"\ndef process_recent_gfs(config: Config) -> Union[str, None]:\n current_date = datetime.now()\n init_meta = get_init_meta(current_date)\n past_init_meta = get_init_meta(current_date)\n\n latest_ready_forecast = is_forecast_ready(init_meta, config.processing_output_path)\n if latest_ready_forecast is not None:\n return latest_ready_forecast\n\n forecast_available = False\n forecast_ready = False\n tries = 0\n\n while not forecast_available and tries < 5:\n forecast_available = True\n for offset in range(0, config.future_sequence_length):\n if not os.path.exists(GribResource(init_meta, offset).get_output_location(config.download_path)):\n latest_ready_forecast = is_forecast_ready(init_meta, config.processing_output_path)\n if latest_ready_forecast is not None:\n forecast_ready = True\n break\n forecast_available = False\n tries += 1\n # get previous GFS run\n current_date = current_date - timedelta(hours=6)\n init_meta = init_meta.get_previous()\n past_init_meta = init_meta\n break\n if forecast_ready:\n break\n if not forecast_available:\n continue\n\n for init in range(0, (config.past_sequence_length // 6) + 1):\n past_init_meta = past_init_meta.get_previous()\n\n for offset in range(0, 6):\n if not os.path.exists(GribResource(past_init_meta, offset).get_output_location(config.download_path)):\n forecast_available = False\n tries += 1\n # get previous GFS run\n current_date = current_date - timedelta(hours=6)\n init_meta = init_meta.get_previous()\n past_init_meta = init_meta\n break\n if not forecast_available:\n break\n\n if forecast_ready:\n return latest_ready_forecast\n\n if forecast_available:\n process_all_needed_gribs(init_meta, config)\n save_info(init_meta, config.processing_output_path)\n return init_meta.get_date_string()\n\n return None\n\n\nif __name__ == \"__main__\":\n process_recent_gfs(Config(24, 1, \"download\", \"processed\", Coords(52.1831174, 52.1831174, 20.9875259, 20.9875259)))","repo_name":"adambelniak/WindForecast","sub_path":"src/gfs_oper/process_oper.py","file_name":"process_oper.py","file_ext":"py","file_size_in_byte":6667,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"9326347845","text":"import sys\n\nNumber1 = input(\"Number \\n\")\ntry:\n Number1 = int(Number1)\nexcept:\n print(\"This is not a number\")\n sys.exit(\"NAN\")\n\nNumber2 = input(\"Number \\n\")\n\ntry:\n Number2 = int(Number2)\nexcept:\n print(\"This is not a number\")\n sys.exit(\"NAN\")\n\nResult = Number1 * Number2\n\nprint(Result)\n","repo_name":"HeinzNepe/PythonStuffs","sub_path":"times.py","file_name":"times.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36474282370","text":"from math import sqrt\r\nfrom . import triangle2\r\nfrom . import figure\r\n\r\n\r\nclass Rectangle(figure.Figure):\r\n coords = None\r\n a, b, c, d, e = 0, 0, 0, 0, 0\r\n s, r, R = 0, 0, 0\r\n\r\n def __init__(self, coords):\r\n self.coords = coords\r\n tr = triangle2.Triangle([coords[0], coords[1], coords[3]])\r\n tr2 = triangle2.Triangle([coords[0], coords[2], coords[3]])\r\n if tr.s and tr2.s:\r\n self.a = tr.a\r\n self.c = tr.c\r\n self.e = tr.b\r\n self.d = tr2.c\r\n self.b = tr2.a\r\n self.s = tr.s + tr2.s\r\n self.calc_r()\r\n self.calc_R()\r\n\r\n def calc_r(self):\r\n self.r = self.a / 2 if self.a == self.b == self.c else 0\r\n\r\n def calc_R(self):\r\n self.R = sqrt(self.a ** 2 + self.b ** 2) / 2","repo_name":"mgttttt/python_labs","sub_path":"lab13/figures_calculations2/rectangle2.py","file_name":"rectangle2.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21747409394","text":"from django.shortcuts import render\nfrom .training_model import result\nimport datetime\nimport json\n\n\n# Create your views here.\ndef home(request):\n date = request.GET.get(\"date\")\n image = request.GET.get(\"images\")\n date_param = 0\n if date:\n date_param = datetime.datetime.strptime(str(date), \"%Y-%m-%d\").strftime(\n \"%d/%m/%Y\"\n )\n # print(\"*********Date***********\", date_param)\n # print(\"*********Image**********\", image)\n context = {}\n if date_param and image:\n df = result(str(image), str(date_param))\n context = {\"d\": df, \"date\": date_param, \"image\": image}\n return render(request, \"home.html\", context)\n","repo_name":"prettyquail/Attendance-System","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35219408216","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass DataQualityOperator(BaseOperator):\n \"\"\"\n This Operator runs data quality checks on fact and dimension tables.\n checks if a certain column contains NULL values by counting all the rows that have NULL in this column. \n expected result is 0.\n\n \"\"\"\n\n ui_color = '#89DA59'\n\n @apply_defaults\n def __init__(self,\n # Define operators params\n redshift_conn_id=\"\",\n tables=\"\",\n *args, **kwargs):\n\n super(DataQualityOperator, self).__init__(*args, **kwargs)\n # Map params \n self.redshift_conn_id = redshift_conn_id\n self.tables = tables\n\n def execute(self, context):\n self.log.info('DataQualityOperator not implemented yet')\n # getting redshift connection\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id) \n \n for table in self.tables:\n records = redshift.get_records(f\"SELECT COUNT(*) FROM {table}\") \n \n if len(records) < 1 or len(records[0]) < 1:\n raise ValueError(f\"Data quality check failed. {table} returned no results\")\n \n num_records = records[0][0]\n\n if num_records < 1:\n raise ValueError(f\"Data quality check failed. No records present in table {table} [0 rows]\")\n \n self.log.info(f\"Passed data quality record count on table {table} check with {num_records} records\")","repo_name":"saramostafaali/Data-Pipelines-with-Airflow-","sub_path":"plugins/operators/data_quality.py","file_name":"data_quality.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6577103200","text":"import numpy as np\nimport common.transformations.orientation as orient\nimport math\n\nFULL_FRAME_SIZE = (1164, 874)\nW, H = FULL_FRAME_SIZE[0], FULL_FRAME_SIZE[1]\neon_focal_length = FOCAL = 910.0\n\n# aka 'K' aka camera_frame_from_view_frame\neon_intrinsics = np.array([\n [FOCAL, 0., W/2.],\n [ 0., FOCAL, H/2.],\n [ 0., 0., 1.]])\n\n\nleon_dcam_intrinsics = np.array([\n [650, 0, 816//2],\n [ 0, 650, 612//2],\n [ 0, 0, 1]])\n\neon_dcam_intrinsics = np.array([\n [860, 0, 1152//2],\n [ 0, 860, 864//2],\n [ 0, 0, 1]])\n\n# aka 'K_inv' aka view_frame_from_camera_frame\neon_intrinsics_inv = np.linalg.inv(eon_intrinsics)\n\n\n# device/mesh : x->forward, y-> right, z->down\n# view : x->right, y->down, z->forward\ndevice_frame_from_view_frame = np.array([\n [ 0., 0., 1.],\n [ 1., 0., 0.],\n [ 0., 1., 0.]\n])\nview_frame_from_device_frame = device_frame_from_view_frame.T\n\n\ndef get_calib_from_vp(vp):\n vp_norm = normalize(vp)\n yaw_calib = np.arctan(vp_norm[0])\n pitch_calib = -np.arctan(vp_norm[1]*np.cos(yaw_calib))\n roll_calib = 0\n return roll_calib, pitch_calib, yaw_calib\n\n\n# aka 'extrinsic_matrix'\n# road : x->forward, y -> left, z->up\ndef get_view_frame_from_road_frame(roll, pitch, yaw, height):\n device_from_road = orient.rot_from_euler([roll, pitch, yaw]).dot(np.diag([1, -1, -1]))\n view_from_road = view_frame_from_device_frame.dot(device_from_road)\n return np.hstack((view_from_road, [[0], [height], [0]]))\n\n\ndef vp_from_ke(m):\n \"\"\"\n Computes the vanishing point from the product of the intrinsic and extrinsic\n matrices C = KE.\n\n The vanishing point is defined as lim x->infinity C (x, 0, 0, 1).T\n \"\"\"\n return (m[0, 0]/m[2,0], m[1,0]/m[2,0])\n\n\ndef vp_from_rpy(rpy):\n e = get_view_frame_from_road_frame(rpy[0], rpy[1], rpy[2], 1.22)\n ke = np.dot(eon_intrinsics, e)\n return vp_from_ke(ke)\n\n\ndef roll_from_ke(m):\n # note: different from calibration.h/RollAnglefromKE: i think that one's just wrong\n return np.arctan2(-(m[1, 0] - m[1, 1] * m[2, 0] / m[2, 1]),\n -(m[0, 0] - m[0, 1] * m[2, 0] / m[2, 1]))\n\n\ndef normalize(img_pts, intrinsics=eon_intrinsics):\n # normalizes image coordinates\n # accepts single pt or array of pts\n intrinsics_inv = np.linalg.inv(intrinsics)\n img_pts = np.array(img_pts)\n input_shape = img_pts.shape\n img_pts = np.atleast_2d(img_pts)\n img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0],1))))\n img_pts_normalized = img_pts.dot(intrinsics_inv.T)\n img_pts_normalized[(img_pts < 0).any(axis=1)] = np.nan\n return img_pts_normalized[:,:2].reshape(input_shape)\n\n\ndef denormalize(img_pts, intrinsics=eon_intrinsics):\n # denormalizes image coordinates\n # accepts single pt or array of pts\n img_pts = np.array(img_pts)\n input_shape = img_pts.shape\n img_pts = np.atleast_2d(img_pts)\n img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0],1))))\n img_pts_denormalized = img_pts.dot(intrinsics.T)\n img_pts_denormalized[img_pts_denormalized[:,0] > W] = np.nan\n img_pts_denormalized[img_pts_denormalized[:,0] < 0] = np.nan\n img_pts_denormalized[img_pts_denormalized[:,1] > H] = np.nan\n img_pts_denormalized[img_pts_denormalized[:,1] < 0] = np.nan\n return img_pts_denormalized[:,:2].reshape(input_shape)\n\n\ndef device_from_ecef(pos_ecef, orientation_ecef, pt_ecef):\n # device from ecef frame\n # device frame is x -> forward, y-> right, z -> down\n # accepts single pt or array of pts\n input_shape = pt_ecef.shape\n pt_ecef = np.atleast_2d(pt_ecef)\n ecef_from_device_rot = orient.rotations_from_quats(orientation_ecef)\n device_from_ecef_rot = ecef_from_device_rot.T\n pt_ecef_rel = pt_ecef - pos_ecef\n pt_device = np.einsum('jk,ik->ij', device_from_ecef_rot, pt_ecef_rel)\n return pt_device.reshape(input_shape)\n\n\ndef img_from_device(pt_device):\n # img coordinates from pts in device frame\n # first transforms to view frame, then to img coords\n # accepts single pt or array of pts\n input_shape = pt_device.shape\n pt_device = np.atleast_2d(pt_device)\n pt_view = np.einsum('jk,ik->ij', view_frame_from_device_frame, pt_device)\n\n # This function should never return negative depths\n pt_view[pt_view[:,2] < 0] = np.nan\n\n pt_img = pt_view/pt_view[:,2:3]\n return pt_img.reshape(input_shape)[:,:2]\n\n\ndef get_camera_frame_from_calib_frame(camera_frame_from_road_frame):\n camera_frame_from_ground = camera_frame_from_road_frame[:, (0, 1, 3)]\n calib_frame_from_ground = np.dot(eon_intrinsics,\n get_view_frame_from_road_frame(0, 0, 0, 1.22))[:, (0, 1, 3)]\n ground_from_calib_frame = np.linalg.inv(calib_frame_from_ground)\n camera_frame_from_calib_frame = np.dot(camera_frame_from_ground, ground_from_calib_frame)\n return camera_frame_from_calib_frame\n\n\ndef pretransform_from_calib(calib):\n roll, pitch, yaw, height = calib\n view_frame_from_road_frame = get_view_frame_from_road_frame(roll, pitch, yaw, height)\n camera_frame_from_road_frame = np.dot(eon_intrinsics, view_frame_from_road_frame)\n camera_frame_from_calib_frame = get_camera_frame_from_calib_frame(camera_frame_from_road_frame)\n return np.linalg.inv(camera_frame_from_calib_frame)\n\n\ndef transform_img(base_img,\n augment_trans=np.array([0,0,0]),\n augment_eulers=np.array([0,0,0]),\n from_intr=eon_intrinsics,\n to_intr=eon_intrinsics,\n output_size=None,\n pretransform=None,\n top_hacks=False,\n yuv=False,\n alpha=1.0,\n beta=0,\n blur=0):\n import cv2 # pylint: disable=import-error\n cv2.setNumThreads(1)\n\n if yuv:\n base_img = cv2.cvtColor(base_img, cv2.COLOR_YUV2RGB_I420)\n\n size = base_img.shape[:2]\n if not output_size:\n output_size = size[::-1]\n\n cy = from_intr[1,2]\n def get_M(h=1.22):\n quadrangle = np.array([[0, cy + 20],\n [size[1]-1, cy + 20],\n [0, size[0]-1],\n [size[1]-1, size[0]-1]], dtype=np.float32)\n quadrangle_norm = np.hstack((normalize(quadrangle, intrinsics=from_intr), np.ones((4,1))))\n quadrangle_world = np.column_stack((h*quadrangle_norm[:,0]/quadrangle_norm[:,1],\n h*np.ones(4),\n h/quadrangle_norm[:,1]))\n rot = orient.rot_from_euler(augment_eulers)\n to_extrinsics = np.hstack((rot.T, -augment_trans[:,None]))\n to_KE = to_intr.dot(to_extrinsics)\n warped_quadrangle_full = np.einsum('jk,ik->ij', to_KE, np.hstack((quadrangle_world, np.ones((4,1)))))\n warped_quadrangle = np.column_stack((warped_quadrangle_full[:,0]/warped_quadrangle_full[:,2],\n warped_quadrangle_full[:,1]/warped_quadrangle_full[:,2])).astype(np.float32)\n M = cv2.getPerspectiveTransform(quadrangle, warped_quadrangle.astype(np.float32))\n return M\n\n M = get_M()\n if pretransform is not None:\n M = M.dot(pretransform)\n augmented_rgb = cv2.warpPerspective(base_img, M, output_size, borderMode=cv2.BORDER_REPLICATE)\n\n if top_hacks:\n cyy = int(math.ceil(to_intr[1,2]))\n M = get_M(1000)\n if pretransform is not None:\n M = M.dot(pretransform)\n augmented_rgb[:cyy] = cv2.warpPerspective(base_img, M, (output_size[0], cyy), borderMode=cv2.BORDER_REPLICATE)\n\n # brightness and contrast augment\n augmented_rgb = np.clip((float(alpha)*augmented_rgb + beta), 0, 255).astype(np.uint8)\n\n # gaussian blur\n if blur > 0:\n augmented_rgb = cv2.GaussianBlur(augmented_rgb,(blur*2+1,blur*2+1),cv2.BORDER_DEFAULT)\n\n if yuv:\n augmented_img = cv2.cvtColor(augmented_rgb, cv2.COLOR_RGB2YUV_I420)\n else:\n augmented_img = augmented_rgb\n return augmented_img\n\n\ndef yuv_crop(frame, output_size, center=None):\n # output_size in camera coordinates so u,v\n # center in array coordinates so row, column\n import cv2 # pylint: disable=import-error\n rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)\n if not center:\n center = (rgb.shape[0]/2, rgb.shape[1]/2)\n rgb_crop = rgb[center[0] - output_size[1]/2: center[0] + output_size[1]/2,\n center[1] - output_size[0]/2: center[1] + output_size[0]/2]\n return cv2.cvtColor(rgb_crop, cv2.COLOR_RGB2YUV_I420)\n","repo_name":"littlemountainman/modeld","sub_path":"common/transformations/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":8208,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"81"} +{"seq_id":"11392881083","text":"def get_size(image_path, max_width, max_height):\n \"\"\"\n loads image from the path\n resizes it and out puts width and height\n \"\"\"\n\n out_w, out_h = max_width, max_height\n\n image_fullsize = loadImage(image_path)\n w, h = image_fullsize.width, image_fullsize.height\n aspect_ratio = float(w)/float(h)\n\n if max_height * aspect_ratio <= max_width:\n out_w, out_h = int(max_height * aspect_ratio), int(max_height)\n\n else:\n out_w, out_h = int(max_width), int(max_width/aspect_ratio)\n\n return out_w, out_h\n","repo_name":"Vipul-Jirge/GitPublic","sub_path":"Generative_Art/globalModules/resize_image.py","file_name":"resize_image.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16041817183","text":"from .colour import Colour\n\nASPECT_RATIO = 16.0 / 10.0\nWIDTH = 320\nHEIGHT = int(WIDTH / ASPECT_RATIO)\n\nRED = Colour(1, 0, 0)\nGREEN = Colour(0, 1, 0)\nBLUE = Colour(0, 0, 1)\nYELLOW = Colour(1, 1, 0)\nWHITE = Colour(1, 1, 1)\nBLACK = Colour(0.001, 0, 0)\n","repo_name":"stefangn98/py-raytracer","sub_path":"src/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32198535445","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"URLs for newsletter manager.\"\"\"\r\n\r\nfrom django.urls import path\r\nfrom django.views.generic import RedirectView\r\nfrom djtito.newsletter import views\r\n\r\n\r\nurlpatterns = [\r\n path('archives//', views.archives, name='newsletter_archives'),\r\n path('archives/', views.archives, name='newsletter_archives_default'),\r\n path('manager/', views.manager, name='newsletter_manager'),\r\n # clear livewhale blurb cache via ajax post\r\n path('cache//clear/', views.clear_cache, name='clear_cache'),\r\n path('', RedirectView.as_view(url='/bridge/')),\r\n]\r\n","repo_name":"carthage-college/django-djtito","sub_path":"djtito/newsletter/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"429877546","text":"# This program solves the wordle based on your inputs\n\nfrom wordle_simulator import update_lists\nfrom wordle_simulator import update_possible_words\nfrom wordle_simulator import most_common_letter\nimport random\n\ndef get_word_optimised(possible_words,correct,result):\n ignore_letter = [letter[0] for letter in result if letter[1] != 'wrong']\n #instead of just returning a random choice - looks at the letter that is most common\n #finding the places that have not been guessed correction\n positions = [0,1,2,3,4]\n if correct == []:\n 1+1#no correct letters guessed - the \"positions\" remain unchanged\n else:\n for place in correct:\n #we remove the places to look at for the correct guesses\n positions.remove(place[0])\n common_letter = most_common_letter(possible_words,ignore_letter)\n #we have the most common letter in the word (outside of correct ones)\n likely_words = [word for word in possible_words if common_letter in word]\n ignore_letter.append(common_letter)\n past_likely_words = ['']\n #just so the code runs\n #ideally want to repeat this process a few times but ignoring the correct letters and the just used most_common_letter\n #gotta check if it returns an empty result or not (it won't return an empty set though... gotta check if it changes from the last one)\n #repeat until it is an empty set and then take a random choice of the set just before the empty set\n while likely_words != past_likely_words:\n #runs until empty set and then prints out a random word from when the set was last not empty\n past_likely_words = likely_words\n common_letter = most_common_letter(likely_words,ignore_letter)\n ignore_letter.append(common_letter)\n likely_words = [word for word in likely_words if common_letter in word]\n #need to make it so that when we rerun it doesn't query the same letter\n #now we select one word of the likely_words\n return random.choice(past_likely_words)\n\ndef convert_result(guess_word,result_word):\n count = 0\n # format - letter, status for that location\n result = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]\n for letter in guess_word:\n result[count][0] = letter\n if result_word[count] == 'g':\n result[count][1] = 'correct'\n elif result_word[count] == 'y':\n result[count][1] = 'nearly_correct'\n elif result_word[count == 'b']:\n result[count][1] = 'wrong'\n count += 1\n return result\n\n# setting up possible_words\npossible_words = []\n# reading in list of 5 letter words\nwith open('fivewords.txt') as f:\n for line in f:\n possible_words.append(line.strip())\n\nguessed = 0\nwhile guessed == 0:\n guess_word = input('Enter your 5 letter guess word')\n while len(guess_word) != 5:\n guess_word = input('Please enter a 5 letter guess word')\n print('What was the result for the word?')\n result_word = input('\"g\" for green, \"b\" for black, \"y\" for yellow')\n if result_word == 'ggggg':\n print('Congratulations!')\n guessed = 1\n exit()\n result = convert_result(guess_word,result_word)\n correct, nearly_correct, wrong = update_lists(result)\n possible_words = update_possible_words(correct, nearly_correct, wrong, possible_words)\n if len(possible_words) == 0:\n print('Something went wrong...')\n exit()\n print(f'There are {len(possible_words)} to choose from.')\n print(possible_words)\n print(f'Why not try the word {get_word_optimised(possible_words, correct, result)}?')\n\n","repo_name":"beaniedude/wordle_simulator","sub_path":"wordle_solver.py","file_name":"wordle_solver.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"75083441866","text":"number_of_snowballs = int(input())\r\n\r\nmax_weight = 0\r\nmax_time = 0\r\nmax_value = 0\r\nmax_quality = 0\r\n\r\nfor snowballs in range(number_of_snowballs):\r\n weight = int(input())\r\n time_needed = int(input())\r\n quality = int(input())\r\n snowball_value = (weight / time_needed) ** quality\r\n if snowball_value > max_value:\r\n max_weight = weight\r\n max_time = time_needed\r\n max_value = snowball_value\r\n max_quality = quality\r\nprint(f'{max_weight} : {max_time} = {int(max_value)} ({max_quality})')\r\n","repo_name":"Mitaka001/PythonFundamentalsSolutions","sub_path":"data_types_and_variables/snowballs.py","file_name":"snowballs.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39853627156","text":"from time import sleep\nfrom collections import deque\n\n\nfrom ..util.common import timer\nfrom .DHTSender import DHTSender\nfrom ..util.common import random_nid, get_neighbor\nfrom ..util.common import ran_str\nfrom ..config import Config\n\n# DHTClient 继承于 DHTSender类\nclass DHTClient(DHTSender):\n '''\n DHT网络的客户端,用于模拟KRPC协议\n '''\n def __init__(self, bind_ip, bind_port):\n DHTSender.__init__(self, bind_ip, bind_port)\n # 绑定ip和port\n self.bind_ip = bind_ip\n self.bind_port = bind_port\n # 随机生成nid\n self.nid = random_nid()\n # 双端队列,用来装扩散路由时返回的节点\n self.nodes = deque(maxlen=Config.MAX_NODE_SIZE)\n # 守护进程\n self.setDaemon(True)\n # 定时器,定时重新加入DHT网络\n timer(Config.REJOIN_DHT_INTERVAL, self.rejoin_dht)\n\n #加入DHT网络,进行路由扩散\n def join_dht(self):\n for address in Config.BOOTSTRAP_NODES:\n self.send_find_node(address)\n\n # 定时检查nodes长度,如果长度为0则重新加入DHT网络\n def rejoin_dht(self):\n print('重新加入DHT网络')\n if len(self.nodes) == 0:\n self.join_dht()\n timer(Config.REJOIN_DHT_INTERVAL, self.rejoin_dht())\n\n # 模拟KRPC协议中的find_node请求模拟\n def send_find_node(self, address, tar_nid=None):\n # 判断是进行路由扩散还是加入DHT\n nid = get_neighbor(tar_nid, self.nid) if tar_nid else self.nid\n #nid = self.nid\n # 随机生成长度为2的tid\n tid = ran_str(2)\n msg = {\n 'tid': tid,\n 'y': 'q',\n 'q': 'find_node',\n 'id': nid,\n 'target': random_nid()\n }\n # 发送find_node krpc\n print('发送find_node请求 msg: {0} \\n to address: {1}'.format(msg, address))\n self.send_krpc(msg, address)\n\n # 自动向双端队列中的节点发送find_node请求\n def auto_send_find_node(self):\n while True:\n try:\n # 从双端队列中取出node\n node = self.nodes.popleft()\n print('取出node: {0}, {1}, {2}'.format(node.nid, node.ip, node.port))\n address = (node.ip, node.port)\n # 向取出的node请求寻找新的节点\n self.send_find_node(address, node.nid)\n except IndexError:\n pass\n # 发送间隔\n sleep(1.0 / Config.MAX_NODE_SIZE)\n\n\n\n\n","repo_name":"zxr111/DHT_Search","sub_path":"spider/dht/DHTClient.py","file_name":"DHTClient.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"18769544519","text":"import network\nimport threading\nfrom vidstream import AudioSender, AudioReceiver, ScreenShareClient, CameraClient, StreamingServer\n\nserver = StreamingServer(network.local_ip_address, network.steaming_port)\nreceiver = AudioReceiver(network.local_ip_address, network.receiving_port)\n\n\ndef start_listening():\n t1 = threading.Thread(target=server.start_server)\n t2 = threading.Thread(target=receiver.start_server)\n t1.start()\n t2.start()\n\n\ndef start_camera_steam():\n camera_client = CameraClient(network.target_ip, network.target_port)\n t3 = threading.Thread(target=camera_client.start_stream)\n t3.start()\n\n\ndef start_screen_sharing():\n screen_client = ScreenShareClient(network.target_ip, network.target_port)\n t4 = threading.Thread(target=screen_client.start_stream)\n t4.start()\n\n\ndef start_audio_stream():\n audio_sender = AudioSender(network.target_ip, network.audio_port)\n t5 = threading.Thread(target=audio_sender.start_stream)\n t5.start()\n\n","repo_name":"thanathasCh/ZoomClone","sub_path":"streaming.py","file_name":"streaming.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37721675895","text":"#!/usr/bin/env python\n\nimport roslib\nroslib.load_manifest('misc_scripts')\n\nimport rospy\nfrom nav_msgs.msg import Odometry\nimport tf\n\npub = None\n\n\ndef callback(msg):\n br = tf.TransformBroadcaster()\n pose = msg.pose.pose\n br.sendTransform((pose.position.x, pose.position.y, 0),\n (pose.orientation.x, pose.orientation.y,\n pose.orientation.z, pose.orientation.w),\n msg.header.stamp,\n \"base_footprint\",\n \"odom\")\n\n\ndef main():\n global pub\n rospy.init_node('odom2tf')\n\n rospy.Subscriber(\"odom\", Odometry, callback)\n\n rospy.spin()\n\nif __name__ == '__main__':\n try:\n main()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"wjwwood/3d_teleop","sub_path":"misc_scripts/nodes/odom2tf.py","file_name":"odom2tf.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3209919995","text":"\"\"\"\nConstants.\n\"\"\"\n\n\nclass Constants:\n \"\"\"Fundamental constants,\n including mass, hbar, e ... etc\n\n Attributes:\n m [float]: mass\n hbar [float]: Reduced Planck constant\n e [float]: Charge\n x0 [float]: Initial position\n L [float]: Length of the box\n N [int]: Number of spatial steps\n dx [float]: Space stepsize\n dt [float]: Time stepsize\n _scale [float]: multiply the potential by a certain amount.\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize the constants.\n \"\"\"\n\n self.m = 1. # Mass\n self.hbar = 1. # Reduced Planck constant\n self.e = 1. # Charge\n\n self.x0 = -0.5 # Initial position\n self.L = 1. # The Length of the box\n self.N = 512 # Number of spatial steps\n self.dx = self.L/self.N # Space stepsize\n self.dt = 0.00001 # Time stepsize\n\n self._scale = (128/self.N)*5e5\n\n def _get_constants(self):\n \"\"\"\n Return constants.\n \"\"\"\n return self.m, self.hbar, self.e, self.L, self.N, self.dx, self.dt\n","repo_name":"marl0ny/QM-Simulator-1D","sub_path":"qm/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"81"} +{"seq_id":"13697232013","text":"import Tkinter as tk\nimport tkFileDialog\nimport tkMessageBox\nimport tkFont\n\nfrom .equipment import CAMERAS\nfrom .plomo import manipulate_files\n\n\nclass PlomoApp(tk.Frame):\n def __init__(self, master=None, path=None, camera=None):\n if master is None:\n master = tk.Tk()\n master.minsize(400, 100)\n tk.Frame.__init__(self, master)\n self.grid()\n self.customFont = tkFont.Font(weight=tkFont.BOLD)\n self.path = tk.StringVar()\n self.camera = tk.StringVar()\n self.create_widgets()\n\n if path:\n self.path.set(path)\n if camera:\n self.camera.set(camera)\n\n def create_widgets(self):\n self.path_title_label = tk.Label(self, text='Directory:',\n font=self.customFont)\n self.path_title_label.grid(row=0, column=0, sticky='W')\n\n self.select_path_button = tk.Button(self, text='Select directory',\n command=self.ask_directory)\n self.select_path_button.grid(row=0, column=1)\n\n self.path_label = tk.Label(self, textvariable=self.path)\n self.path_label.grid(row=1, column=0, columnspan=4)\n\n self.camera_title_label = tk.Label(self, text='Camera:',\n font=self.customFont)\n self.camera_title_label.grid(row=2, column=0, sticky='W')\n\n options = CAMERAS.keys()\n options.sort()\n self.camera_option = tk.OptionMenu(self, self.camera, *options)\n self.camera_option.grid(row=2, column=1)\n\n self.save_button = tk.Button(self, text='Save', command=self.save)\n self.save_button.grid(row=2, column=3, sticky='E')\n\n self.quit_button = tk.Button(self, text='Quit', command=self.quit)\n self.quit_button.grid(row=2, column=4, sticky='W')\n\n def ask_directory(self):\n title = 'Select the folder containing your photos'\n self.path.set(tkFileDialog.askdirectory(mustexist=True, title=title))\n\n def save(self):\n if not self.path.get() or not self.camera.get():\n tkMessageBox.showwarning('Plomo',\n 'You must select directory and a camera')\n return\n\n if manipulate_files(self.path.get(), CAMERAS[self.camera.get()]):\n tkMessageBox.showinfo('Plomo', 'Successfully saved')\n else:\n tkMessageBox.showwarning('Plomo', 'Could not save Exif data')\n","repo_name":"relekang/plomo","sub_path":"plomo/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"36661215854","text":"import pandas as pd\nimport numpy as np\n\nclass MonteCarloSim():\n def __init__(self, numiter=5000):\n self.counter = numiter\n\n def fit(self, df):\n \"\"\" Compute the Monte Carlo Simulator \"\"\"\n # df contains all the search results, including hidden fields\n # but the requested are saved as self.feature_variables\n df.sort_values(by=['date'])\n df.set_index('date', inplace=True)\n codes = df.columns\n days = df.shape[0] / len(codes)\n\n daily_ret = df.pct_change()\n annual_ret = daily_ret.mean() * days\n daily_cov = daily_ret.cov()\n annual_cov = daily_cov * days\n\n port_ret = []\n port_risk = []\n port_weights = []\n port_sharpe = []\n\n for _ in range(self.counter):\n weights = np.random.random(len(codes))\n weights /= np.sum(weights)\n\n returns = np.dot(weights, annual_ret)\n risk = np.sqrt(np.dot(weights.T, np.dot(annual_cov, weights)))\n\n port_ret.append(returns)\n port_risk.append(risk)\n port_weights.append(weights)\n port_sharpe.append(returns/risk)\n\n portfolio = { 'Sharpe': port_sharpe, 'Returns' : port_ret, 'Risk': port_risk }\n for i, s in enumerate(codes):\n portfolio[s] = [weight[i] for weight in port_weights]\n output_df = pd.DataFrame(portfolio)\n output_df = output_df[['Sharpe', 'Returns', 'Risk'] + [s for s in codes]]\n return output_df","repo_name":"woococo/stock","sub_path":"monte_sim.py","file_name":"monte_sim.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44508596830","text":"import re\n\ndef str_add(numbers):\n return_sum = 0\n return_assert = []\n numList = re.findall(r'-?\\d+',numbers)\n\n for number in numList:\n if number[0] == '-':\n return_assert.append(number)\n elif int(number) > 1000:\n continue\n else:\n return_sum += int(number)\n if return_assert:\n raise AssertionError(\"negatives not allowed\", return_assert)\n return return_sum","repo_name":"wckdtrb/tdd_katas","sub_path":"katas/kata1_string_calc.py","file_name":"kata1_string_calc.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25047380346","text":"from django_cron import CronJobBase, Schedule\nfrom django.utils import timezone\nfrom django.conf import settings\nimport datetime\nfrom hrm.models import EmployeeContract\n\n\nclass EmployeeContractCronJob(CronJobBase):\n RUN_EVERY_MINS = 1\n\n schedule = Schedule(run_every_mins=RUN_EVERY_MINS)\n code = 'hrm.employee_contract_cron_job'\n\n def do(self):\n print(\"Checking contract\")\n print(timezone.now())\n print(\"===========================================================\")\n warning = timezone.now() + datetime.timedelta(days=settings.MINIERP_SETTINGS['HRM']['recontract_warning'])\n contract_list = EmployeeContract.objects.all().filter(end_date__lte=warning.date())\n for contract in contract_list:\n contract.contract_status = contract.check_contract_status()\n contract.save(update_fields=['contract_status'])\n print(\"===========================================================\")\n print(\"DONE\")\n print(\"===========================================================\")\n","repo_name":"andrewidya/littleerp","sub_path":"hrm/cron_job.py","file_name":"cron_job.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41306690009","text":"from smartystreets_python_sdk import StaticCredentials, exceptions, ClientBuilder\nfrom smartystreets_python_sdk.us_street import Lookup as StreetLookup\n\nclass ValidationServiceImple(object):\n def validAddress(self, addressNo,streetName1,streetName2, city, region, countryCode, postalCode):\n \"\"\"\n valid address\n \"\"\"\n auth_id = \"5b91521a-637e-a34b-7578-c5db463e6fa6\"\n auth_token = \"QEww5NZby77yZQhVKjhK\"\n\n credentials = StaticCredentials(auth_id, auth_token)\n client = ClientBuilder(credentials).with_licenses([\"us-core-cloud\"]).build_us_street_api_client()\n\n lookup = StreetLookup()\n lookup.addressee = \"Tom\"\n lookup.street = streetName1\n lookup.street2 = streetName2\n lookup.city = city\n lookup.state = region\n lookup.zipcode = postalCode\n lookup.candidates = 3\n lookup.match = \"invalid\" # \"invalid\" is the most permissive match,\n\n try:\n client.send_lookup(lookup)\n except exceptions.SmartyException as err:\n print(err)\n return\n\n result = lookup.result\n first_candidate = result[0]\n if first_candidate.components.zipcode:\n return \"Valid\"\n else:\n return \"The address is not valid\"\n","repo_name":"yingjinsun/cc-sprint-users","sub_path":"UsersModule/infos/services/ValidationService.py","file_name":"ValidationService.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17092767813","text":"import json\nimport os\n\nimport pytest\n\ntest_file = os.path.join(\"test_data\", \"test_data1.csv\")\nwith open(os.path.join(\"tests\", \"b64.txt\"), \"r\") as fobj:\n txt = fobj.read()\n\n\ndef test_im_to_b64():\n from GUI_client import image_to_b64\n answer = image_to_b64(os.path.join(\"tests\", \"image.png\"))\n assert answer == txt\n\n\ndef test_data_to_fig():\n from GUI_client import data_to_fig, preprocess_data\n data = preprocess_data(test_file)\n data_to_fig(data, \"temp.png\")\n assert os.path.isfile(\"temp.png\")\n os.remove(\"temp.png\")\n\n\ndef test_photometrics():\n from GUI_client import photometrics_from_csv\n ans_photo_data, ans_metrics = photometrics_from_csv(test_file)\n for key, value in ans_metrics.items():\n if isinstance(value, tuple):\n ans_metrics[key] = list(value)\n with open(os.path.join(\"tests\", \"test_data1.json\"), \"r\") as jobj:\n expected_metrics = json.load(jobj)\n expected_metrics[\"filename\"] = os.path.basename(test_file)\n assert ans_metrics == expected_metrics\n assert not os.path.isfile(\"temp.png\")\n assert ans_photo_data == txt # ahah! compared the string instead!\n\n\n@pytest.mark.parametrize(\"pid, name, image, hr, expected\", [\n (\"201\", \"\", \"\", \"\", {\"patient_id\": \"201\"}),\n (\"\", \"\", \"\", \"\", False),\n (\"\", \"Ann\", \"image\", \"72\", False),\n (\"201\", \"Ann\", \"image\", \"72\", {\"patient_id\": \"201\", \"patient_name\": \"Ann\",\n \"image\": [\"image\"], \"hr\": \"72\"})\n])\ndef test_create_out(pid, name, image, hr, expected):\n from GUI_client import create_output\n answer = create_output(pid, name, image, hr)\n assert answer == expected\n\n\ndef test_html_search():\n from server import render_image, app\n from GUI_client import img_from_html\n with app.app_context():\n my_html = render_image(txt, \"Ann\")\n answer = img_from_html(my_html)\n assert answer == txt\n bad_answer = img_from_html(\"\")\n assert bad_answer == \"\"\n","repo_name":"Aaronearlerichardson/Server_gui_client","sub_path":"tests/client_test.py","file_name":"client_test.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34180406564","text":"import RPi.GPIO as gpio\nfrom collections import namedtuple\nimport colorsys\nimport math\nimport time\nfrom util import frange\nfrom util import sign\nimport threading\n\nclass LED:\n\n def __init__(self, r, g, b, brightness, pins):\n self.brightness = brightness\n self.rgb = [r, g, b]\n self.pins = pins\n self.initialized = True\n self.shiftSteps = 50\n self.lock = threading.Lock()\n\n # Setup all the LED colors with an initial\n # duty cycle of 100 which is off\n gpio.setup(pins[0],gpio.OUT)\n gpio.setup(pins[1],gpio.OUT)\n gpio.setup(pins[2],gpio.OUT)\n gpio.setup(pins[3], gpio.OUT)\n\n gpio.output(pins[3],1)\n\n \n Freq = 120 #Hz\n RED = gpio.PWM(pins[0], Freq)\n RED.start(100)\n GREEN = gpio.PWM(pins[1], Freq)\n GREEN.start(100)\n BLUE = gpio.PWM(pins[2], Freq)\n BLUE.start(100)\n\n PinObjects = namedtuple('PinObjects', ['RED', 'GREEN','BLUE'])\n self.pinObjs = PinObjects(RED,GREEN,BLUE)\n\n def reInit(self):\n if not self.initialized:\n # Setup all the LED colors with an initial\n # duty cycle of 100 which is off\n gpio.setup(self.pins[0],gpio.OUT)\n gpio.setup(self.pins[1],gpio.OUT)\n gpio.setup(self.pins[2],gpio.OUT)\n gpio.setup(self.pins[3], gpio.OUT)\n\n gpio.output(self.pins[3],1)\n \n Freq = 120 #Hz\n RED = gpio.PWM(self.pins[0], Freq)\n RED.start(100)\n GREEN = gpio.PWM(self.pins[1], Freq)\n GREEN.start(100)\n BLUE = gpio.PWM(self.pins[2], Freq)\n BLUE.start(100)\n\n PinObjects = namedtuple('PinObjects', ['RED', 'GREEN','BLUE'])\n self.pinObjs = PinObjects(RED,GREEN,BLUE)\n\n self.on = True\n self.setColor(self.rgb)\n\n def setBrightness(self,newBrightness):\n self.brightness = newBrightness\n\n\n def setPins(self, newPins):\n self.pins = newPins\n\n def setColor(self, newColor):\n self.rgb = newColor\n self.pinObjs.RED.ChangeDutyCycle(100-(self.rgb[0]*self.brightness))\n self.pinObjs.GREEN.ChangeDutyCycle(100-(self.rgb[1]*self.brightness))\n self.pinObjs.BLUE.ChangeDutyCycle(100-(self.rgb[2]*self.brightness))\n\n def shiftToColor(self, newColor):\n curValues = self.rgb\n\n diffs = [abs(newColor[i] - curValues[i]) for i in range(3)]\n\n for i in range(len(diffs)):\n diffs[i] = round(diffs[i]*1000)/1000\n\n shiftVals = [0,0,0]\n for c in range(3):\n if(diffs[c] != 0):\n shiftVals[c] = [i for i in frange(curValues[c], newColor[c], diffs[c]/self.shiftSteps)]\n else:\n shiftVals[c] = [self.rgb[c]] * (self.shiftSteps+1)\n\n shiftr = shiftVals[0]\n shiftg = shiftVals[1]\n shiftb = shiftVals[2] \n\n for s in range(self.shiftSteps+1):\n shiftr[s] = round(shiftr[s]*1000)/1000\n shiftg[s] = round(shiftg[s]*1000)/1000\n shiftb[s] = round(shiftb[s]*1000)/1000\n\n self.setColor([shiftr[s], shiftg[s], shiftb[s]])\n time.sleep(.003)\n\n \n def cleanup(self):\n gpio.cleanup(self.pins)\n\n\nclass DistanceSensor:\n\n def __init__(self,trigPin, echoPin):\n self.triggerPin = trigPin\n self.echoPin = echoPin\n gpio.setup(self.triggerPin, gpio.OUT)\n gpio.setup(self.echoPin, gpio.IN)\n self.lock = threading.Lock()\n\n def distance(self):\n # set Trigger to HIGH\n gpio.output(self.triggerPin, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n gpio.output(self.triggerPin, False)\n\n StartTime = time.time()\n StopTime = time.time()\n\n # save StartTime\n while gpio.input(self.echoPin) == 0:\n StartTime = time.time()\n\n # save time of arrival\n while gpio.input(self.echoPin) == 1:\n StopTime = time.time()\n\n TimeElapsed = StopTime - StartTime\n distance = (TimeElapsed * 34300) / 2\n\n return distance\n\n def cleanup(self):\n gpio.cleanup([self.echoPin,self.triggerPin])\n\n\n#","repo_name":"nmcglohon/mini-rover","sub_path":"components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23419109811","text":"from django.shortcuts import render\nfrom django.http.response import JsonResponse # HttpResponse subclass that helps create a JSON encoded response\nfrom rest_framework.parsers import JSONParser # parser class from rest framework used to accept JSON request\nfrom rest_framework import status # named const from rest framework used to show http response status code from server after a client request\n\nfrom countries.models import Countries\nfrom countries.serializer import CountriesSerializer\nfrom rest_framework.decorators import api_view # api wrapper from rest framework used to work with function based views\n # helps recieve request instances inside views\n# Create your views here.\n\n@api_view(['GET','POST'])\ndef countriesList(request):\n if request.method == 'GET':\n countries = Countries.objects.all()\n \n name = request.GET.get('name', None)\n if name is not None:\n countries = countries.filter(name_icontains = name)\n\n countriesSerial = CountriesSerializer(countries, many = True)\n return JsonResponse(countriesSerial.data, safe = False)\n\n elif request.method == 'POST':\n countriesData = JSONParser().parse(request)\n print(countriesData)\n countriesSerial = CountriesSerializer(data = countriesData)\n if countriesSerial.is_valid():\n countriesSerial.save()\n return JsonResponse(countriesSerial.data, status = status.HTTP_201_CREATED)\n return JsonResponse(countriesSerial.errors, status = status.HTTP_404_BAD_REQUEST)\n\n\n\n@api_view(['GET','PUT','DELETE'])\n\ndef countriesDetails(request, pk):\n try:\n countries = Countries.objects.get(pk = pk)\n except Countries.DoesNotExist:\n return JsonResponse({\n 'message': 'Country does not exist'\n }, status = status.HTTP_404_NOT_FOUND)\n \n if request.method == 'GET':\n countriesSerial = CountriesSerializer(countries)\n return JsonResponse(countriesSerial.data)\n \n elif request.method == 'PUT':\n countriesData = JSONParser().parse(request)\n countriesSerial = CountriesSerializer(countries, data = contriesData)\n if contriesSerial.is_valid():\n contriesSerial.save()\n return JsonResponse(countriesSerial.data)\n return JsonResponse(countriesSerial.errors, status = status.HTTP_404_BAD_REQUEST)\n \n elif request.method == 'DELETE':\n countries.delete()\n return JsonResponse({\n 'message': 'Country deleted successfully'\n }, status = status.HTTP_400_BAD_REQUEST)\n","repo_name":"vnay-krshn/REST-API-training","sub_path":"countries/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72086866504","text":"from tkinter import *\nfrom tkinter import PhotoImage\nfrom PIL import ImageTk\nfrom tkinter import messagebox\nimport socket\nimport dangnhap\nfrom backend.dl_giangvien import tengv_email\nfrom send_message import gui_ma_xn\nfrom backend.xacthuc import kt_ma_xacnhan,xoa_ma_xacnhan\nfrom firebase_admin import credentials\nimport firebase_admin\nfrom firebase_admin import auth\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nimport threading\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n\n\ndef main():\n\n\n\n def gui_ma_xacnhan():\n if data_e.get() == \"\":\n messagebox.showerror(\"Thông báo\",\"Vui lòng nhập email\")\n elif tengv_email(data_e.get()) == \"\":\n messagebox.showerror(\"Thông báo\",\"Email chưa đăng ký tài khoản\")\n else:\n gui_ma_xn(data_e.get())\n lbem.config(text=\"Mã\")\n txtEmail.config(textvariable=data_ma)\n btn.config(command=xacnhanma)\n def xacnhanma():\n if len(data_ma.get()) != 4:\n messagebox.showerror(\"Thông báo\" , \"Mã xác nhận có 4 chữ số\")\n elif kt_ma_xacnhan(data_e.get(),data_ma.get()) == \"\" :\n messagebox.showerror(\"Thông báo\",\"Mã xác nhận không đúng\")\n else:\n lb_frame.place_forget()\n btn.config(command=capnhatmatkhau)\n lbem.config(text=\"Mật khẩu\")\n txtEmail.config(textvariable=data_matkhau)\n \n def truycapwweb(email,passw):\n # cred = credentials.Certificate(\"facerecognition.json\")\n try:\n firebase_admin.get_app()\n except:\n cred = credentials.Certificate(\"facerecognition.json\")\n firebase_admin.initialize_app(cred)\n link= auth.generate_password_reset_link(email)\n options = Options()\n options.add_experimental_option('excludeSwitches', ['enable-logging'])\n # driver=webdriver.Chrome(executable_path=r'C:/Program Files/Google/Chrome/Application/chrome.exe',chrome_options=options)\n driver = webdriver.Chrome(ChromeDriverManager().install())\n driver.get(link)\n\n WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, \"/html/body/div/div/form/div[2]/div/div[1]/input\"))).send_keys(passw)\n WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, \"/html/body/div/div/form/div[3]/div/button\"))).click()\n driver.quit()\n def capnhatmatkhau():\n if data_matkhau.get()==\"\" and data_nhaplai.get()==\"\":\n messagebox.showerror(\"Thông báo\",\"Nhập đầy đủ dữ liệu\")\n elif data_matkhau.get() != data_nhaplai.get():\n messagebox.showerror(\"Thông báo\",\"Mật khẩu không khớp\")\n elif len(data_matkhau.get()) <6 and len(data_matkhau.get())>10:\n messagebox.showerror(\"Thông báo\",\"Mật khẩu phải từ 6 đến 10 kí tự\")\n else:\n truycapwweb(data_e.get(),data_matkhau.get())\n messagebox.showinfo(\"thông báo\",\"Đã cập nhật mật khẩu\")\n threading.Thread(target=xoa_ma_xacnhan,args=(data_e.get(),)).start()\n trove()\n \n\n def trove():\n win.destroy()\n dangnhap.main()\n\n win=Tk()\n win.geometry(\"600x600+400+100\")\n win.resizable(False,False)\n win.iconbitmap(r\"img/iconphanmem.ico\")\n win.config(bg=\"white\")\n win.title(\"Quên mật khẩu\")\n img_bg=ImageTk.PhotoImage(file=\"img/bg_laylaimatkhau.png\")\n img_btn=ImageTk.PhotoImage(file=f\"img/btn_xacnhan.png\")\n ing_btntrolai=ImageTk.PhotoImage(file=\"img/trove_bgdoimatkhau.png\")\n bg_an=ImageTk.PhotoImage(file=\"img/frame_bg.png\")\n\n bg=Canvas(win,width=600,height=600,bg=\"white\")\n bg.pack(side=\"left\",padx=0)\n anhnen=bg.create_image(300,300,image=img_bg)\n ten_thiet_bi = socket.gethostname()\n \n data_e=StringVar()\n data_ma=StringVar()\n data_matkhau=StringVar()\n data_nhaplai=StringVar()\n\n \n txtEmail=Entry(bg,width=23,font=(\"Baloo Tamma 2 Medium\",12),bg=\"white\",bd=0,textvariable=data_e)\n txtEmail.place(x=243,y=228)\n txtnhaplai=Entry(bg,width=23,font=(\"Baloo Tamma 2 Medium\",12),bg=\"white\",bd=0,textvariable=data_nhaplai)\n txtnhaplai.place(x=243,y=287)\n \n lbem=Label(bg,text=\"Email\",font=(\"Baloo Tamma 2 Medium\",14),width=8,fg=\"#5F1965\",justify=\"center\",bg=\"#F9D9D4\")\n lbem.place(x=148,y=224)\n \n lbnhaplai=Label(bg,text=\"Nhập lại\",font=(\"Baloo Tamma 2 Medium\",14),fg=\"#5F1965\",bg=\"#F9D9D4\")\n lbnhaplai.place(x=151,y=281)\n \n lb_frame = Label(bg,image=bg_an,bd=0,borderwidth=0, highlightthickness=0)\n lb_frame.place(x=134,y=272)\n \n btn=Button(bg,image=img_btn,bd=0,borderwidth=0, highlightthickness=0,activebackground=\"#BFAAE5\" ,command=gui_ma_xacnhan)\n btn.place(x=231,y=494)\n\n win.mainloop()\n\nif __name__ == '__main__':\n main()","repo_name":"HUYTIEUQUY/face_reconition","sub_path":"quenmatkhau.py","file_name":"quenmatkhau.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28983374986","text":"from django.urls import path\nfrom blogapp.views import *\n\n\n\nurlpatterns = [\n path(\"\", inicio, name = \"inicio\"),\n path(\"crypto/\", cryptos, name = \"crypto\"),\n path(\"acciones/\", acciones, name = \"acciones\"),\n path(\"formularioposteos/\", formulario_posteos, name = \"posteos\"),\n\n\n]\n","repo_name":"cerisso/blogapp_de_coder","sub_path":"blogapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71082971146","text":"# Dylan Andrews, dmandrew@usc.edu\n# ITP 115, Fall 2020\n# Assignment 5, part 1\n# Description:\n# This program runs a word jumble game where a user tries to guess a word that is jumbled from a list\nimport random\n\ndef main():\n # original word list\n wordList = ['penguin', 'gorilla', 'kangaroo', 'lion', 'giraffe', 'elephant']\n\n # program chooses a word to be jumbled, turns it into a list, creates a list for jumbled letters\n word = random.choice(wordList)\n wordToList = list(word)\n jumbledList = []\n letterCounter = 0\n\n # fills in a new list to be turned into the jumbled word\n while letterCounter < len(word):\n randLetter = random.choice(wordToList)\n jumbledList.append(randLetter)\n wordToList.remove(randLetter)\n letterCounter += 1\n\n\n # turns the jumbled list into a string\n jumbledWord = \"\".join(jumbledList)\n # prints the jumbled word and prompts use to guess, initializes the counter at 1\n print(\"The jumbled word is \\\"\" + jumbledWord + \"\\\"\\n\")\n guess = input(\"Please enter your guess: \")\n counter = 1\n hint = \"i\"\n\n # prompts user to repeat guess if they don't get it right, and after two tries they will be asked if they want\n # a hint\n while guess != word:\n if counter >= 2:\n hint = input(\"Would you like a hint? (y/n): \")\n while hint != \"Y\" and hint != \"y\" and hint != \"N\" and hint != \"n\":\n hint = input(\"Please enter y or n: \")\n if hint == \"y\" or hint == \"Y\":\n if word == \"penguin\":\n print(\"This animal is a bird that can't fly\")\n elif word == \"gorilla\":\n print(\"This animal is very big and hangs out in tropical forests\")\n elif word == \"kangaroo\":\n print(\"This animal loves to hop around\")\n elif word == \"lion\":\n print(\"This animal is often referred to as the king of the jungle\")\n elif word == \"giraffe\":\n print(\"This animal has a very long neck\")\n elif word == \"elephant\":\n print(\"This animal has a very long nose\")\n print(\"Try again\")\n counter += 1\n guess = input(\"Please enter your guess: \")\n # score based on number of attempts and if they used a hint\n score = 100 // counter\n\n if hint == \"y\" or hint == \"Y\":\n score = score // 2\n\n # prints out number of guesses and final score\n if guess == word and counter > 1:\n print(\"\\nYou got it!\\n\\nIt took you\", str(counter), \"tries.\")\n print(\"Your Score:\", str(score) + \"/100\")\n if guess == word and counter == 1:\n print(\"\\nYou got it!\\n\\nIt took you\", str(counter), \"try.\")\n print(\"Your Score:\", str(score) + \"/100\")\n\n\nmain()","repo_name":"dandrews19/WordJumbleGame","sub_path":"ITP115_A5Pt1_Andrews_Dylan.py","file_name":"ITP115_A5Pt1_Andrews_Dylan.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37128436660","text":"import requests\nimport json\n\nBASE_URL = 'http://127.0.0.1:8000/'\nEND_POINT = 'apiview/'\nheaders = {'content-type': 'application/json'}\n\n\n# def get_data(id=None):\n# data = {}\n# if id is not None:\n# data = {'id': id}\n# json_data = json.dumps(data)\n# res = requests.get(url=BASE_URL+END_POINT, headers=headers, data=json_data)\n# print(res.json())\n#\n# get_data(1)\n\n# def post_data():\n# data = {\n# 'first_name': 'Prashant',\n# 'last_name': 'Desai',\n# 'roll_number': 105,\n# 'city': 'satara'\n# }\n# json_data = json.dumps(data)\n# res = requests.post(url=BASE_URL + END_POINT, headers=headers, data=json_data)\n# print(res.json())\n#\n#\n# post_data()\n\n# def update_data(id):\n# data = {\n# 'id': id,\n# 'first_name': 'Prashant',\n# 'last_name': 'Desai',\n# 'roll_number': 105,\n# 'city': 'pachagani'\n# }\n# json_data = json.dumps(data)\n# res = requests.put(url=BASE_URL + END_POINT, headers=headers, data=json_data)\n# print(res.json())\n#\n#\n# update_data(6)\n\ndef delete_data(id):\n data = {\n 'id': id,\n }\n json_data = json.dumps(data)\n res = requests.delete(url=BASE_URL + END_POINT, headers=headers, data=json_data)\n print(res.json())\n\n\ndelete_data(6)\n\n\n","repo_name":"prathsaw/drf","sub_path":"apiviewproject/myapp.py","file_name":"myapp.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4190317317","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2022/6/13 16:34\n# @Author : 郑春兴\n# @Email : zhengchunxing@aegis-data.cn\n# @File : test_robot_log.py\n# @Project : PlaywrightProject\nimport time\n\nimport allure\nimport pytest\n\nfrom common.handle_logging import test_log\nfrom pages.robotlogspage import RobotLogs\n\n@allure.feature(\"机器人日志页面\")\nclass TestRobotLog():\n @allure.story(\"机器人日志按条件查询并验证查询结果的用例\")\n @allure.description('''\n 1.登录成功后校验全局配置菜单是否可见\n 2.在左侧导航栏中点击“全局配置>日志记录>机器人日志”打开机器人日志管理页面,并校验指定元素是否可见\n 2.输入操作用户“唐”\n 3.打开操作类型下拉列表选择”登录“,点击搜索按钮\n 4.在查询结果列表中校验操作类型栏文本值和操作用户栏文本值,并输出测试通过或不通过日志\n ''')\n def test_robot_log(self,page,login_hospital):\n userManagementPage = login_hospital\n # 打开全局配置菜单\n userManagementPage.global_configuration.click()\n # 打开日志记录下拉菜单\n userManagementPage.logging_menu.click()\n # 点击后台日志菜单\n userManagementPage.robot_log_menu.click()\n # 实例化后台日志页\n robotLogsPage = RobotLogs(page)\n robotLogsPage.date_icon.wait_for()\n try:\n robotLogsPage.user_input.fill(\"游客\")\n #robotLogsPage.start_date_input.fill(\"2022-05-20 00:00:00\")\n #robotLogsPage.end_date_input.fill(\"2022-05-21 00:00:00\")\n robotLogsPage.operation_type_drop_list.click()\n #content_test = robotLogsPage.login_operation_type.inner_text()\n #assert content_test == \"登录\"\n robotLogsPage.login_operation_type.click()\n robotLogsPage.search_button.click()\n\n time.sleep(1)\n robotLogsPage.assertText(\"//table[@class=\\\"el-table__body\\\"]/tbody/tr/td[5]/div\",\"登录\")\n robotLogsPage.assertText(\"//table[@class=\\\"el-table__body\\\"]/tbody/tr/td[2]/div\",\"游客\")\n test_log.info(\"机器人日志查询测试通过\")\n except Exception as e:\n allure.attach(page.screenshot(), \"用例失败截图\", allure.attachment_type.PNG)\n test_log.error('机器人日志查询测试不通过')\n test_log.debug('预期结果:按照用户+日期+机器人+操作类型能查询出相关日志记录')\n test_log.exception(e)\n pytest.fail(\"预期结果:按照用户+日期+机器人+操作类型能查询出相关日志记录\")\n\n","repo_name":"JohnnyZCX/armyonlawUIAutomation","sub_path":"test/test_robot_log.py","file_name":"test_robot_log.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"74780688905","text":"\"\"\"(1) Output a menu of automotive services and the corresponding cost of each service. (2 pts) \n\nEx:\n\nDavy's auto shop services\nOil change -- $35\nTire rotation -- $19\nCar wash -- $7\nCar wax -- $12\n\"\"\"\nservices = {\n\"Oil change\" : 35.0,\n\"Tire rotation\" : 19.0,\n\"Car wash\" : 7.0,\n\"Car wax\" : 12.0,\n\"-\" : \"No service\"\n}\n\n\n# print the menu\nprint(\"Davy's auto shop services\")\nprint(\"%s -- $%d\"%(\"Oil change\",services[\"Oil change\"]))\nprint(\"%s -- $%d\"%(\"Tire rotation\",services[\"Tire rotation\"]))\nprint(\"%s -- $%d\"%(\"Car wash\",services[\"Car wash\"]))\nprint(\"%s -- $%d\"%(\"Car wax\",services[\"Car wax\"]))\n\n\"\"\"\n2) Prompt the user for two services from the menu. (2 pts)\n\nEx:\n\nSelect first service:\nOil change\nSelect second service:\nCar wax\n\n\"\"\"\n\nservice1 = input(\"Select first service:\")\nservice2 = input(\"Select second service:\")\nservice1_charge = 0\nservice2_charge = 0\n\nif service1 != \"-\":\n service1_charge = services[service1] \nif service2 != \"-\":\n service2_charge = services[service2] \n\nprint(\"Total: $%d\" % () )\n","repo_name":"CISVVC/cis83-examples","sub_path":"branching/4.11/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15379956214","text":"from turnout_election_schemes.schemes.errors import FailedElectionError\nfrom .candidate import Candidate\nfrom .stv_round import Round, _Random\nfrom .vote import Vote\n\nclass SingleTransferableVoteScheme(object):\n def __init__(self, num_vacancies, candidates, votes, random=_Random()):\n self.num_vacancies = num_vacancies\n self.original_candidates = candidates\n self.remaining_candidates = candidates\n self.votes = votes\n self.random = random\n self.rounds = []\n self.success = False\n\n def run_round(self):\n new_round = Round(self.num_vacancies, self.remaining_candidates, self.votes, random=self.random)\n new_round.run()\n\n self.remaining_candidates = filter(\n lambda candidate: not candidate in new_round.results()['excluded'].keys(),\n self.remaining_candidates\n )\n\n self.rounds.append(new_round)\n\n def latest_round(self):\n if len(self.rounds) > 0:\n return self.rounds[-1]\n\n def round_results(self):\n return map(lambda r: r.results(), self.rounds)\n\n def completed(self):\n if len(self.rounds) > 0:\n return self.latest_round().all_vacancies_filled()\n\n def final_results(self):\n if self.completed():\n return self.latest_round().elected_candidates()\n\n def outcome(self):\n return (self.success, self.round_results(), self.final_results())\n\n def run(self):\n self.success = True\n try:\n while not self.completed():\n self.run_round()\n except FailedElectionError:\n self.success = False\n","repo_name":"devfort/turnout-election-schemes","sub_path":"turnout_election_schemes/schemes/singletransferablevote/scheme.py","file_name":"scheme.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37739962347","text":"\"\"\"Add file id column to params table\n\nRevision ID: b9fd6bcd50e2\nRevises: 8e44c2d99a18\nCreate Date: 2022-06-15 11:08:59.410835\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"b9fd6bcd50e2\"\ndown_revision = \"8e44c2d99a18\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\"parameters\", sa.Column(\"file_id\", sa.Integer(), nullable=True))\n op.create_index(\n op.f(\"ix_parameters_file_id\"), \"parameters\", [\"file_id\"], unique=False\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f(\"ix_parameters_file_id\"), table_name=\"parameters\")\n op.drop_column(\"parameters\", \"file_id\")\n # ### end Alembic commands ###\n","repo_name":"mccoymd/DPM-SRI","sub_path":"migrations/versions/b9fd6bcd50e2_add_file_id_column_to_params_table.py","file_name":"b9fd6bcd50e2_add_file_id_column_to_params_table.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18047672080","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n heap = []\n for lst in lists:\n cur = lst\n while cur:\n heap.append(cur.val)\n cur = cur.next\n \n heapq.heapify(heap)\n head = cur = ListNode()\n while heap:\n cur.next = ListNode(heapq.heappop(heap))\n cur = cur.next\n \n return head.next\n","repo_name":"Mussie7/A2SV-community-questions-answer-codes","sub_path":"merge k sorted lists(using heap).py","file_name":"merge k sorted lists(using heap).py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16066898658","text":"interval_start = input()\ninterval_end = input()\nletter_to_miss = input()\n# print(ord(\"a\"))\ncounter = 0\n\nfor first_letter in range(ord(interval_start), ord(interval_end) + 1):\n if chr(first_letter) != letter_to_miss:\n for second_letter in range(ord(interval_start), ord(interval_end) + 1):\n if chr(second_letter) != letter_to_miss:\n for third_letter in range(ord(interval_start), ord(interval_end) + 1):\n if chr(third_letter) != letter_to_miss:\n print(f'{chr(first_letter)}{chr(second_letter)}{chr(third_letter)}', end = \" \")\n counter += 1\n\nprint(counter)","repo_name":"VelinIliev/python-basic-softuni","sub_path":"23-nested_loops-more-exercises/02-letters_combinations.py","file_name":"02-letters_combinations.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33960149367","text":"import logging\r\nimport numpy as np\r\nlogging.getLogger().setLevel(logging.INFO)\r\nfrom pyhaystack.util import filterbuilder as fb\r\n# %%\r\nimport pyhaystack\r\nimport datetime\r\nfrom datetime import timezone\r\nsession = pyhaystack.connect(implementation='skyspark',\r\n uri='http://skyspark.hanson-inc.com:8080',\r\n username='administrator',\r\n password='IownSkyspark',\r\n project='springfieldHanson',\r\n pint=True)\r\n\r\n#%%\r\nsession\r\nop = session.about()\r\nop.wait()\r\nnav = op.result\r\nprint(nav)\r\n\r\n#%%\r\nsite=session.site\r\nmy_equip = site['rtu1']\r\n# equip = site.equipments\r\n\r\n#%%\r\n# res = my_equip.find_points(fb.Field('navName') == fb.Scalar('rtu1_RACO2')).result\r\n# res.points\r\npoint = list(my_equip.find_entity(fb.Field('navName') == fb.Scalar('rtu1_RACO2')).result.values())[0]\r\nres = session.his_write(point.id, {site.tz.localize(datetime.datetime.now()): 0})\r\nprint(res.result)","repo_name":"mschrader15/hanson-bas","sub_path":"misc/scatchpad.py","file_name":"scatchpad.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74960371465","text":"# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\n\nfrom datetime import date\nimport coolest\n\n\nproject = 'COOLEST'\ncopyright = f\"{date.today().year}, COOLEST developers \"\nauthor = coolest.__author__\nrelease = coolest.__version__\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n 'sphinx.ext.duration', # prints compilation durations\n 'sphinx.ext.autodoc', # documentation based on docstrings\n 'sphinx.ext.autosummary', # generates class descriptions based on code\n 'sphinx.ext.napoleon', # generates .rst pages based on package modules\n 'sphinx.ext.viewcode', # adds links to highlighted code\n 'myst_nb', # supports markdown .md files\n 'sphinx_design', # responsive design components\n 'autoapi.extension', # generates autoapi directory\n \"sphinx_math_dollar\", # allows to write LaTeX in .md files\n \"sphinxcontrib.bibtex\", # handles bibtext entries for citations\n # \"nbsphinx\", # for embedding jupyter notebooks as doc pages\n]\n\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# AutoAPI configuration\nautoapi_dirs = [\"../coolest\"]\nautoapi_type = \"python\"\nautoapi_add_toctree_entry = False\nautoapi_options = [\"show-module-summary\", \"undoc-members\"]\nautodoc_typehints = \"signature\"\n# autoapi_python_class_content = 'both' # includes both class and __init__ docstrings\n\n# Options for HTML output\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = 'sphinx_book_theme' # third-party theme\nhtml_static_path = ['_static']\n\n# Suffixes to support with myst_parser\nsource_suffix = {\n \".rst\": \"restructuredtext\", \n \".ipynb\": \"myst-nb\", \n \".md\": \"myst-nb\"\n}\n\nnb_custom_formats = {\n \".md\": [\"jupytext.reads\", {\"fmt\": \"mystnb\"}],\n}\nmyst_enable_extensions = [\"colon_fence\"]\n\n# do not re-compile notebooks when generating the html documentation\nnb_execution_mode = \"off\"\n\nautosummary_generate = True\nadd_module_names = False # prevent cluttering the doc with the full submodule path\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\nhtml_title = \"\"\nhtml_logo = \"_static/coolest_logo.png\"\nhtml_css_files = [\"custom.css\"]\n\n# Skip files we do not want to be included in the documentation\ndef skip_util_classes(app, what, name, obj, skip, options):\n excluded_modules = [\n \"coolest.template.info\",\n \"coolest.template.lazy\",\n \"coolest.template.classes.regularization\",\n \"coolest.template.classes.regularization_list\",\n ]\n if what == \"module\" and name in excluded_modules:\n skip = True\n\n excluded_packages = [\n \"coolest.template.classes.regularization\",\n \"coolest.template.classes._old\",\n ]\n if what == \"package\" and name in excluded_packages:\n skip = True\n\n return skip\n\ndef setup(sphinx):\n sphinx.connect(\"autoapi-skip-member\", skip_util_classes)\n\n\nbibtex_bibfiles = [\"../joss/paper.bib\", \"refs.bib\"]\nbibtex_default_style = \"alpha\" # alpha, plain, unsrt, unsrtalpha\n","repo_name":"aymgal/COOLEST","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"33342778058","text":"import os\nimport struct\nfrom typing import List, Optional, Union\nfrom deeplake.core.compression import decompress_array, decompress_bytes\nfrom deeplake.core.sample import Sample # type: ignore\nfrom deeplake.core.serialize import (\n check_sample_shape,\n bytes_to_text,\n)\nfrom deeplake.core.tiling.sample_tiles import SampleTiles\nfrom deeplake.core.polygon import Polygons\nfrom deeplake.util.video import normalize_index\nfrom .base_chunk import BaseChunk, InputSample, catch_chunk_read_error\n\n\nclass SampleCompressedChunk(BaseChunk):\n def extend_if_has_space(self, incoming_samples: List[InputSample], update_tensor_meta: bool = True, ignore_errors: bool = False, **kwargs) -> float: # type: ignore\n self.prepare_for_write()\n num_samples: float = 0\n dtype = self.dtype if self.is_byte_compression else None\n compr = self.compression\n skipped: List[int] = []\n\n for i, incoming_sample in enumerate(incoming_samples):\n try:\n serialized_sample, shape = self.serialize_sample(incoming_sample, compr)\n if shape is not None:\n self.num_dims = self.num_dims or len(shape)\n check_sample_shape(shape, self.num_dims)\n except Exception:\n if ignore_errors:\n skipped.append(i)\n continue\n raise\n\n if isinstance(serialized_sample, SampleTiles):\n incoming_samples[i] = serialized_sample # type: ignore\n if self.is_empty:\n self.write_tile(serialized_sample)\n num_samples += 0.5\n break\n\n else:\n sample_nbytes = len(serialized_sample)\n if self.is_empty or self.can_fit_sample(sample_nbytes):\n self.data_bytes += serialized_sample # type: ignore\n\n self.register_in_meta_and_headers(\n sample_nbytes,\n shape,\n update_tensor_meta=update_tensor_meta,\n )\n num_samples += 1\n else:\n if serialized_sample:\n sample = Sample(\n buffer=serialized_sample, compression=compr, shape=shape, dtype=dtype # type: ignore\n )\n sample.htype = self.htype\n incoming_samples[i] = sample\n break\n\n for i in reversed(skipped):\n incoming_samples.pop(i)\n return num_samples\n\n @catch_chunk_read_error\n def read_sample( # type: ignore\n self,\n local_index: int,\n cast: bool = True,\n copy: bool = False,\n sub_index: Optional[Union[int, slice]] = None,\n stream: bool = False,\n decompress: bool = True,\n is_tile: bool = False,\n to_pil: bool = False,\n ):\n self.check_empty_before_read()\n partial_sample_tile = self._get_partial_sample_tile()\n if partial_sample_tile is not None:\n return partial_sample_tile\n buffer = self.memoryview_data\n bps = self.byte_positions_encoder\n bps_empty = bps.is_empty()\n if not bps_empty:\n sb, eb = bps[local_index]\n if stream and self.is_video_compression:\n header_size = struct.unpack(\" nframes:\n raise IndexError(\"Start index out of bounds.\")\n\n if self.tensor_meta.htype == \"polygon\":\n buffer = decompress_bytes(buffer, self.compression)\n return Polygons.frombuffer(\n bytes(buffer),\n dtype=self.tensor_meta.dtype,\n ndim=shape[-1],\n )\n\n sample = decompress_array(\n buffer,\n shape,\n self.dtype,\n self.compression,\n start_idx=start,\n end_idx=stop,\n step=step,\n reverse=reverse,\n to_pil=to_pil,\n )\n if to_pil:\n return sample\n\n if squeeze:\n sample = sample.squeeze(0)\n\n if cast and sample.dtype != self.dtype:\n sample = sample.astype(self.dtype)\n elif copy and not sample.flags[\"WRITEABLE\"]:\n sample = sample.copy()\n return sample\n\n def update_sample(self, local_index: int, sample: InputSample):\n self.prepare_for_write()\n serialized_sample, shape = self.serialize_sample(\n sample, self.compression, break_into_tiles=False\n )\n\n self.check_shape_for_update(shape)\n old_data = self.data_bytes\n self.data_bytes = self.create_updated_data(\n local_index, old_data, serialized_sample\n )\n\n # update encoders and meta\n new_nb = (\n None if self.byte_positions_encoder.is_empty() else len(serialized_sample)\n )\n self.update_in_meta_and_headers(local_index, new_nb, shape)\n","repo_name":"activeloopai/deeplake","sub_path":"deeplake/core/chunk/sample_compressed_chunk.py","file_name":"sample_compressed_chunk.py","file_ext":"py","file_size_in_byte":6467,"program_lang":"python","lang":"en","doc_type":"code","stars":7141,"dataset":"github-code","pt":"81"} +{"seq_id":"36111740318","text":"# 기준에 따라 데이터를 정렬\n\n# # 6-1.py 선택 정렬 소스코드\n# array = [7, 5, 9, 0, 3, 1, 6, 2, 4, 8]\n\n# for i in range(len(array)):\n# min_index = i # 가장 작은 원소의 인덱스\n# for j in range(i+1, len(array)):\n# if array[min_index] > array[j]:\n# min_index = j\n# array[i], array[min_index] = array[min_index], array[i] # 스와프\n\n# print(array)\n\n# # 6-2.py 파이썬 스와프(Swap) 소스코드\n# # 0 인덱스와 1 인덱스의 원소 교체하기\n# array = [3, 5]\n# array[0], array[1] = array[1], array[0]\n\n# print(array)\n\n# 6-3.py 삽입 정렬 소스코드\narray = [7, 5, 9, 0, 3, 1, 6, 2, 4, 8]\n\nfor i in range(1, len(array)):\n for j in range(i, 0, -1): # 인덱스 i부터 1까지 감소하며 반복하는 문법\n if array[j] < array[j-1]: # 한 칸씩 왼쪽으로 이동\n array[j], array[j-1] = array[j-1], array[j]\n else: # 자기보다 작은 데이터를 만나면 그 위치에서 멈춤\n break\n\nprint(array)\n","repo_name":"Jung0Jin/Algorithm_study","sub_path":"DongBinBook/6.1.py","file_name":"6.1.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17982843164","text":"import os,sys\nimport json\nfrom flask import Flask, render_template, flash, redirect, url_for, session, request, logging, jsonify\nfrom werkzeug import secure_filename\nfrom flask_mysqldb import MySQL\nfrom wtforms import Form, StringField, TextAreaField, PasswordField, validators, DateField, SelectField, SelectMultipleField, widgets\nfrom passlib.hash import sha256_crypt\nfrom functools import wraps\nfrom flask_jsglue import JSGlue\nfrom flask import send_from_directory\nfrom collections import defaultdict\n\n\n\n#GLOBALS\nUPLOAD_FOLDER = '/home/wjeo001/py/static/quick-quiz-master/img'\nUPLOAD_FOLDER_QUIZ = '/home/wjeo001/py/static/quick-quiz-master/quiz_lib'\nALLOWED_EXTENSIONS = set(['json', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\n\napp = Flask(__name__)\n\n# @app.template_filter()\n# def datetimefilter(value, format='%d/%m/%Y'):\n# \"\"\"convert a datetime to a different format.\"\"\"\n# return value.strftime(format)\n\n# app.jinja_env.filters['datetimefilter'] = datetimefilter\n\n# app.config[\"DEBUG\"] = True\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nAPP_ROOT=os.path.dirname(os.path.abspath(__file__))\n\n# config MySql\n# app.config['MYSQL_HOST']='wjeo001.mysql.pythonanywhere-services.com'\n# app.config['MYSQL_USER']='wjeo001'\n# app.config['MYSQL_PASSWORD']='mar_Vjdls!2'\n# app.config['MYSQL_DB']='wjeo001$gen_001'\n# app.config['MYSQL_CURSORCLASS']='DictCursor'\napp.config['MYSQL_HOST']='localhost'\napp.config['MYSQL_USER']='root'\napp.config['MYSQL_PASSWORD']='mar_Vjdls!2'\napp.config['MYSQL_DB']='gen_001'\napp.config['MYSQL_CURSORCLASS']='DictCursor'\n\n# init MYSQL\nmysql=MySQL(app)\n\n#init JSGlue\njsglue = JSGlue(app)\n\n\n#Articles = Articles();\n\n@app.route('/text')\ndef text():\n\treturn render_template('text.html')\n\n@app.route('/')\n#index\ndef index():\n\treturn home()\n\t#return render_template('home.html')\n#home\n@app.route('/home')\ndef home():\n\t#create cursor\n\tcur=mysql.connection.cursor()\n\n\t#get articles\n\ts=\"select * from articles order by id desc limit 5\"\n\tresult=cur.execute(s)\n\tarticles=cur.fetchall()\n\n\n\n\t#close connection\n\tcur.close()\n\n\treturn render_template('home.html', articles=articles)\n\n\n#about\n@app.route('/about')\ndef about():\n\treturn render_template('about.html')\n\n#Articles\n@app.route('/articles')\ndef articles():\n\t#create cursor\n\tcur=mysql.connection.cursor()\n\n\t#get articles\n\tresult=cur.execute(\"select * from articles\")\n\n\tarticles=cur.fetchall()\n\tif result > 0:\n\t\treturn render_template('articles.html', articles=articles)\n\telse:\n\t\tmsg='No articles found'\n\t\treturn render_template('articles.html', msg=msg)\n\n\t#close connection\n\tcur.close()\n\n#single article\n@app.route('/article//')\ndef article(id):\n\t#create cursor\n\tcur=mysql.connection.cursor()\n\n\t#get article\n\tresult=cur.execute(\"select * from articles where id = %s\", [id])\n\n\tarticle=cur.fetchone()\n\treturn render_template('article.html', article=article)\n\n#Register form class\nclass RegisterForm(Form):\n\tfirstname = StringField('FirstName', [validators.Length(min=1, max=50)])\n\tlastname = StringField('LastName', [validators.Length(min=1, max=50)])\n\tusername = StringField('Username',[validators.Length(min=4, max=25)])\n\temail = StringField('Email',[validators.Length(min=6,max=50)])\n\tpassword = PasswordField('Password', [\n\t\t\tvalidators.DataRequired(),\n\t\t\tvalidators.EqualTo('confirm', message='Your record cannot be found')\n\t\t])\n\tconfirm = PasswordField('Confirm Password')\n\n#User Register\n@app.route('/register', methods=['GET','POST'])\ndef register():\n\tform = RegisterForm(request.form)\n\tif request.method == 'POST' and form.validate():\n\t\tfirstname = form.firstname.data\n\t\tlastname = form.lastname.data\n\t\temail = form.email.data\n\t\tusername = form.username.data\n\t\tpassword = sha256_crypt.encrypt(str(form.password.data))\n\n\t\t#Create cursor\n\t\tcur = mysql.connection.cursor()\n\n\t\t#execute\n\t\ts=\"select username from users where username='\"+username+\"'\"\n\t\tresult=cur.execute(s)\n\t\tapp.logger.info(result)\n\t\tif result>0:\n\t\t\tapp.logger.info('duplicate username')\n\t\t\tflash(\"Username \"+ username + \" already exists. Please try another username\", 'danger')\n\t\t\tcur.close()\n\t\t\treturn render_template('Register.html',form=form)\n\t\telse:\n\t\t\ts=\"insert into users(firstname, lastname, email, username, password,power_level) values ('\"+firstname+\"','\"+lastname+\"','\"+email+\"','\"+username+\"','\"+password+\"','student')\"\n\t\t\tcur.execute(s)\n\t\t\ts=\"insert into people(firstname, lastname, email, username, people_type) values ('\"+firstname+\"','\"+lastname+\"','\"+email+\"','\"+username+\"','student')\"\n\t\t\tcur.execute(s)\n\n\t\t# Commit to DB\n\t\tmysql.connection.commit()\n\n\t\t#close connection\n\t\tcur.close()\n\n\t\tflash('You are now registered and can log in', 'success')\n\n\t\treturn redirect(url_for('login'))\n\treturn render_template('Register.html', form = form)\n\n#user login\n@app.route('/login', methods=['GET','POST'])\ndef login():\n\tif request.method == 'POST':\n\t\t#get form fields\n\t\tusername=request.form['username']\n\t\tpassword_candidate=request.form['password']\n\n\t\t#create cursor\n\t\tcur=mysql.connection.cursor()\n\n\t\t#get user by username\n\t\ts=(\"select u.username,u.power_level,u.firstname,u.lastname,p.id,u.password \"+\n\t\t\t\"from users u \" +\n\t\t\t\"inner join people p on p.username=u.username where u.username='\"+username+\"'\")\n\t\t# result=cur.execute(\"select * from users where username= %s\", [username])\n\t\tresult=cur.execute(s)\n\n\n\n\t\tif result > 0:\n\t\t\t# Get stored hash\n\t\t\tdata = cur.fetchone()\n\t\t\tpassword=data['password']\n\n\t\t\t#compare passwords\n\t\t\tif sha256_crypt.verify(password_candidate,password):\n\t\t\t\t#app.logger.info('PASSWORD MATCHED')\n\t\t\t\tsession['logged_in'] = True\n\t\t\t\tsession['username']=username\n\t\t\t\tsession['rights']= data['power_level']\n\t\t\t\tsession['fullname']= data['firstname'] + ' ' + data['lastname']\n\t\t\t\tsession['id']=data['id']\n\t\t\t\tflash('You are now logged in', 'success')\n\t\t\t\treturn redirect(url_for('dashboard'))\n\t\t\telse:\n\t\t\t\terror='Invalid login'\n\t\t\t\treturn render_template('login.html', error=error)\n\t\t\t#close connection\n\t\t\tcur.close()\n\t\telse:\n\t\t\terror='Username not found'\n\t\t\treturn render_template('login.html', error=error)\n\n\n\n\treturn render_template('login.html')\n\n#check if user logged in\ndef is_logged_in(f):\n\t@wraps(f)\n\tdef wrap(*args,**kwargs):\n\t\tif 'logged_in' in session:\n\t\t\treturn f(*args, **kwargs)\n\t\telse:\n\t\t\tflash('Unauthorised, Please login', 'danger')\n\t\t\treturn redirect(url_for('login'))\n\treturn wrap\n\n#check if the user has admin rights\ndef is_admin(f):\n\t@wraps(f)\n\tdef wrap(*args,**kwargs):\n\t\ta=session['rights']\n\t\tif a=='admin':\n\t\t\treturn f(*args,**kwargs)\n\t\telse:\n\t\t\tflash('You do not have admin rights', 'danger')\n\t\t\treturn redirect(url_for('home'))\n\treturn wrap\n\n#Logout\n@app.route('/logout')\n@is_logged_in\ndef logout():\n\tsession.clear()\n\tflash('You are now logged out', 'success')\n\treturn redirect(url_for('login'))\n\n#password reset\n@app.route('/resetpw/', methods=['GET','POST'])\n@is_admin\ndef resetpw(id):\n\tcur=mysql.connection.cursor()\n\tnew_pw_string=\"pw123456\"\n\tnewpw=sha256_crypt.encrypt(new_pw_string)\n\ts=\"update users set password='{}' where username='{}'\".format(newpw,id)\n\tapp.logger.info(s)\n\tresult=cur.execute(s)\n\tcur.close\n\tflash(\"Password reset done\",\"success\")\n\treturn redirect(url_for('users'))\n\n\n#user profile\nclass ProfileForm(Form):\n\tfirstname=StringField('First Name')\n\tlastname=StringField('Last Name')\n\temail=StringField('Email')\n\tmob=StringField('Mobile Phone')\n\tdob=DateField('Date of Birth',format='%Y-%m-%d')\n\tphone=StringField('Phone Number')\n\tadr_street=StringField('Street Address')\n\tadr_suburb=StringField('Suburb')\n\tadr_state=SelectField('State',choices=[('NSW','NSW'),('QLD','QLD'),('VIC','VIC'),('ACT','ACT'),('WA','WA'),('SA','SA'),('NT','NT')])\n\tadr_postcode=StringField('Postcode')\n\tadr_type=SelectField('Address Type',choices=[('HOME','HOME'),('POST','POST')])\n\t# people_type=SelectField('User type',choices=[('student','student'),('staff','staff'),('admin','admin')]) #admin only\n\tusername=StringField('User Name') #admin only\n\n@is_logged_in\n@app.route('/profile')\ndef profile():\n\tcur = mysql.connection.cursor()\n\ts=\"select p.*,a.street_address,a.suburb,a.state,a.postcode,a.addr_type from people p left join address a on a.people_id=people_id where p.username='\"+session['username']+\"'\"\n\tresult=cur.execute(s)\n\tuser=cur.fetchone()\n\tcur.close()\n\treturn render_template('profile_view.html',user=user)\n\n@is_logged_in\n@app.route('/profile_edit/',methods=['GET','POST'])\ndef profile_edit(username):\n\tform=ProfileForm(request.form)\n\tcur=mysql.connection.cursor()\n\t#s=\"select p.*,a.street_address,a.suburb,a.state,a.postcode,a.addr_type from people p left join address a on a.people_id=people_id where p.username='\"+session['username']+\"'\"\n\ts=\"select p.*,a.street_address,a.suburb,a.state,a.postcode,a.addr_type from people p left join address a on a.people_id=p.id where p.username='\"+username+\"'\"\n\tresult=cur.execute(s)\n\tuser=cur.fetchone()\n\tcur.close()\n\n\tform.firstname.data=user['firstname']\n\tform.lastname.data=user['lastname']\n\tform.email.data=user['email']\n\tform.mob.data=user['mob']\n\tform.dob.data=user['date_of_birth']\n\tform.phone.data=user['phone']\n\tform.adr_street.data=user['street_address']\n\tform.adr_suburb.data=user['suburb']\n\tform.adr_state.data=user['state']\n\tform.adr_postcode.data=user['postcode']\n\tform.adr_type.data=user['addr_type']\n\t# form.people_type.data=user['people_type']\n\n\tif request.method=='POST':\n\t\tfirstname=request.form['firstname']\n\t\tlastname=request.form['lastname']\n\t\temail=request.form['email']\n\t\tmob=request.form['mob']\n\t\tdob=request.form['dob']\n\t\tphone=request.form['phone']\n\t\tadr_street=request.form['adr_street']\n\t\tadr_suburb=request.form['adr_suburb']\n\t\tadr_state=request.form['adr_state']\n\t\tadr_postcode=request.form['adr_postcode']\n\t\tadr_type=request.form['adr_type']\n\t\t# people_type=request.form['people_type']\n\n\t\tcur=mysql.connection.cursor()\n\t\t#s=\"update people set firstname='{}', lastname='{}', email='{}', mob='{}', date_of_birth='{}', phone='{}', people_type='{}' where id={}\".format(firstname,lastname,email,mob,dob,phone,people_type,session['id'])\n\t\ts=\"update people set firstname='{}', lastname='{}', email='{}', mob='{}', date_of_birth='{}', phone='{}', people_type='student' where username='{}'\".format(firstname,lastname,email,mob,dob,phone,username)\n\t\tapp.logger.info(s)\n\t\t#ss=(s+\" where id={}\").format(session['id'])\n\t\tresult=cur.execute(s)\n\n\t\t# if 'student' not in request.form['people_type']:\n\t\t# \ts=\"update users set power_level='\"+people_type+\"' where username='\"+username+\"'\"\n\t\t# \tapp.logger.info(s)\n\t\t# \tresult=cur.execute(s)\n\n\t\t#ss=\"update address set street_address='{}', addr_type='{}', suburb='{}', state='{}', postcode='{}' where people_id={}\".format(adr_street,adr_type,adr_suburb,adr_state,adr_postcode,session['id'])\n\t\tss=\"update address set street_address='{}', addr_type='{}', suburb='{}', state='{}', postcode='{}' where people_id={}\".format(adr_street,adr_type,adr_suburb,adr_state,adr_postcode,user['id'])\n\t\tapp.logger.info(ss)\n\t\tresult=cur.execute(ss)\n\t\tif result<1:\n\t\t\t# ss=\"insert into address(street_address,addr_type,suburb,state,postcode,people_id) values ('{}','{}','{}','{}','{}',{})\".format(adr_street,adr_type,adr_suburb,adr_state,adr_postcode,session['id'])\n\t\t\tss=\"insert into address(street_address,addr_type,suburb,state,postcode,people_id) values ('{}','{}','{}','{}','{}',{})\".format(adr_street,adr_type,adr_suburb,adr_state,adr_postcode,user['id'])\n\t\t\tapp.logger.info(ss)\n\t\t\tresult=cur.execute(ss)\n\n\t\tmysql.connection.commit()\n\n\t\tcur.close()\n\t\tflash('Profile updated','success')\n\t\tif session['id']==user['id']:\n\t\t\treturn redirect(url_for('profile'))\n\t\telse:\n\t\t\treturn redirect(url_for('users'))\n\n\treturn render_template('profile_edit.html',form=form)\n\n@app.route('/data/dashboard/')\ndef charttest(idx):\n\tcur=mysql.connection.cursor()\n\ts=\"select c.id, c.course_name, c.course_desc, c.start_date, c.end_date, count(pu.unit_id) as unitcount from people p inner join people_units pu on pu.people_id=p.id inner join courses c on c.id=pu.course_id inner join users on users.username=p.username where pu.status='Active' and users.username='\"+session['username']+\"' group by c.id, c.course_name, c.course_desc, c.start_date, c.end_date\"\n\tapp.logger.info([idx])\n\tif idx=='2':\n\t\ts=\"select sbj.name, count(un.id) as unitcount from units un inner join people_units pu on pu.unit_id=un.id \" \\\n\t + \"inner join people p on p.id=pu.people_id inner join users u on u.username=p.username \" \\\n\t\t + \"left join ref_subjects sbj on sbj.id=un.subject where pu.status='Active' and u.username='\" + session['username'] + \"' group by sbj.name \"\n\n #This not done yet\n # \telif idx=='3':\n # \t s=\"select qs_name\"\n\n\n\tresult=cur.execute(s)\n\tdata=cur.fetchall()\n\tjsn=jsonify(data)\n\tcur.close()\n\treturn jsn\n\n#Dashboard\n@app.route('/dashboard')\n@is_logged_in\ndef dashboard():\n\t#create cursor\n\tcur=mysql.connection.cursor()\n\t#get Enrolled Courses\n\ts=\"select c.id, c.course_name, c.course_desc, c.start_date, c.end_date, count(pu.unit_id) as unitcount from people p inner join people_units pu on pu.people_id=p.id inner join courses c on c.id=pu.course_id inner join users on users.username=p.username where pu.status='Active' and users.username='\"+session['username']+\"' group by c.id, c.course_name, c.course_desc, c.start_date, c.end_date\"\n\tresult=cur.execute(s)\n\tcourses=cur.fetchall()\n\tunits=0\n\tmsg=\"\"\n\tif result < 1:\n\t \tmsg=\"You don't have any enrolled courses yet\"\n\telse:\n\t\t#get Units of the courses for the user\n\t\ts=\"select pu.unit_id,pu.course_id, u.subject, u.unit_name, u.unit_desc, u.start_date,u.end_date,u.semester from people_units pu inner join units u on u.id=pu.unit_id inner join people p on p.id=pu.people_id inner join users on users.username=p.username where users.username='\" + session['username']+\"'\"\n\t\tresult=cur.execute(s)\n\t\tunits=cur.fetchall()\n\ts=\"select u.unit_name,qs.qs_name,count(pql.qr_id) as attempts from people_unit_quiz_link pql inner join quiz_sets qs on qs.qs_id=pql.qs_id inner join units u on u.id=qs.unit_id where exists (select 1 from people_units pu inner join people p on pu.people_id=p.id where pu_id=pql.pu_id and p.id={}) group by u.unit_name,qs.qs_name\".format(session['id'])\n\tresult=cur.execute(s)\n\tprogress=cur.fetchall()\n\tcur.close()\n\treturn render_template('dashboard.html', msg=msg,courses=courses,units=units, jscourse=jsonify(courses), progress=progress)\n\n#Article form class\nclass ArticleForm(Form):\n\ttitle = StringField('Title', [validators.Length(min=1, max=200)])\n\tbody = TextAreaField('Body',[validators.Length(min=30)])\n\n#Add Article\n@app.route('/add_article',methods=['GET','POST'])\n@is_logged_in\ndef add_article():\n\tform = ArticleForm(request.form)\n\tif request.method == 'POST' and form.validate():\n\t\ttitle=form.title.data\n\t\tbody=form.body.data\n\n\t\t#Create cursor\n\t\tcur=mysql.connection.cursor()\n\n\t\t#Execute\n\t\ts=\"select concat(firstname,' ',LastName) as auth from users where username='\" + session['username'] + \"'\"\n\t\tresult=cur.execute(s)\n\t\tauthorname=cur.fetchone()\n\n\t\t#cur.execute(\"insert into articles (title,body,author) values (%s,%s,%s)\", (title,body,session['username']))\n\t\tcur.execute(\"insert into articles (title,body,author) values (%s,%s,%s)\", (title,body,authorname['auth']))\n\n\t\t#commit\n\t\tmysql.connection.commit()\n\n\t\t#close\n\t\tcur.close()\n\n\t\tflash('Article Created','success')\n\n\t\treturn redirect(url_for('dashboard'))\n\treturn render_template('add_article.html',form=form)\n\n#Edit Article\n@app.route('/edit_article/',methods=['GET','POST'])\n@is_logged_in\ndef edit_article(id):\n\t# Create cursor\n\tcur=mysql.connection.cursor()\n\n\t#get article by id\n\tresult=cur.execute(\"select * from articles where id=%s\", [id])\n\tarticle=cur.fetchone()\n\n\t#Get form\n\tform = ArticleForm(request.form)\n\n\t#populate article form fields\n\tform.title.data=article['title']\n\tform.body.data=article['body']\n\n\tif request.method == 'POST' and form.validate():\n\t\ttitle=request.form['title']\n\t\tbody=request.form['body']\n\n\t\t#Create cursor\n\t\tcur=mysql.connection.cursor()\n\n\t\t#Execute\n\t\ts=\"select concat(firstname,' ',LastName) as auth from users where username='\" + session['username'] + \"'\"\n\t\tresult=cur.execute(s)\n\t\tauthorname=cur.fetchone()\n\n\t\t#cur.execute(\"insert into articles (title,body,author) values (%s,%s,%s)\", (title,body,session['username']))\n\t\tcur.execute(\"update articles set title=%s, body=%s, author=%s where id= %s\", (title,body, authorname['auth'], id))\n\n\t\t#commit\n\t\tmysql.connection.commit()\n\n\t\t#close\n\t\tcur.close()\n\n\t\tflash('Article Updated','success')\n\t\treturn redirect(url_for('articles'))\n\t\t# return redirect(url_for('dashboard'))\n\treturn render_template('edit_article.html',form=form)\n\n#Delete Article\n@app.route('/delete_article/', methods=['POST'])\n@is_logged_in\ndef delete_article(id):\n\t#create cursor\n\tcur=mysql.connection.cursor()\n\n\t#execute\n\tcur.execute(\"delete from articles where id=%s\", [id])\n\n\t#commit\n\tmysql.connection.commit()\n\n\t#close\n\tcur.close()\n\n\tflash('Article deleted','success')\n\n\treturn redirect(url_for('dashboard'))\n\n\n#Quiz\n#+Quiz form class\nclass QuizForm(Form):\n\t# Qnum=SelectField('Question Number',choices=[(y,str(y)) for y in range(1,100)],coerce=int)\n\tQuestion = TextAreaField('Question')\n\tsubject=SelectField('Subject',choices=[],coerce=int)\n\tcategories=SelectField('Category',choices=[],coerce=int)\n\tdescription=SelectField('Description',choices=[],coerce=int)\n\top1=TextAreaField('Choice a')\n\top2=TextAreaField('Choice b')\n\top3=TextAreaField('Choice c')\n\top4=TextAreaField('Choice d')\n\top_short=StringField('What is the correct answer?')\n#delete staging quiz\n@app.route('/delete_staging_quiz/',methods=['GET','POST'])\n@is_logged_in\n@is_admin\ndef quiz_delete_quiz(id):\n\tcur=mysql.connection.cursor()\n\tcur.execute(\"delete from quiz_staging_questions where qq_number=%s\",(id))\n\tcur.execute(\"delete from quiz_staging_answers where qq_number=%s\",(id))\n\tmysql.connection.commit()\n\t# cur.execute(\"DROP TRIGGER tr\")\n\tcur.execute(\"CREATE TRIGGER tr AFTER UPDATE ON quiz_staging_questions FOR EACH ROW SET @id:=@id+1\")\n\tcur.execute(\"ALTER TABLE quiz_staging_questions ADD COLUMN new_id INT NOT NULL AFTER qq_number\")\n\tcur.execute(\"SET @id=1\")\n\tcur.execute(\"UPDATE quiz_staging_questions SET new_id=@id\")\n\tcur.execute(\"DROP TRIGGER tr\")\n\tcur.execute(\"update quiz_staging_answers a inner join quiz_staging_questions b on b.qq_number=a.qq_number set a.qq_number=b.new_id\")\n\tcur.execute(\"UPDATE quiz_staging_questions SET qq_number=new_id\")\n\tcur.execute(\"ALTER TABLE quiz_staging_questions DROP new_id\")\n\tmysql.connection.commit()\n\tcur.close()\n\treturn redirect(url_for('quiz_Create_new_quiz'))\n\n#+Edit existing quiz set\n@app.route('/quiz_modify/',methods=['GET','POST'])\n@is_logged_in\n@is_admin\ndef quiz_modify_quiz(id):\n\tform = QuizForm(request.form)\n\tcur=mysql.connection.cursor()\n\tapp.logger.info('---quiz_set id---')\n\tapp.logger.info(id)\n\n\tcur.execute(\"select * from quiz_sets where qs_id=%s\",[id])\n\tqs_result=cur.fetchone()\n\tcur.execute(\"select * from quiz_questions_commit where qs_id=%s\",[id])\n\tqlist=cur.fetchall()\n\n\t#retreive category info\n\tcur.execute(\"select id,name from ref_subjects order by name\")\n\tresult=cur.fetchall()\n\tform.subject.choices=[(a['id'],a['name']) for a in result]\n\tcur.execute(\"select id,name from ref_description order by name\")\n\tresult=cur.fetchall()\n\tform.description.choices=[(a['id'],str(a['name'])) for a in result]\n\tcur.execute(\"select id,name from ref_categories order by name\")\n\tresult=cur.fetchall()\n\tform.categories.choices=[(a['id'],a['name']) for a in result]\n\n\n\ttitle=qs_result['qs_name']\n\tsession['title']=title\n\tunit_id=qs_result['unit_id']\n\tsession['unit_id']=unit_id\n\n\tapp.logger.info('--unit id---')\n\tapp.logger.info(unit_id)\n\tparam=request.args.get('edit',default=\"x\")\n\tcombo=\"\"\n\tcorrect_answer=\"\"\n\n\tif request.method=='GET' and param!='x': #edit view of a question\n\t\tresult=cur.execute(\"select * from quiz_questions_commit where qq_number=%s and qs_id=%s\",(param,[id]))\n\t\tq=cur.fetchone()\n\t\tform.Question.data=q['qq_body']\n\t\tform.subject.process_data(q['subject_id'])\n\t\tform.categories.process_data(q['category_id'])\n\t\tform.description.process_data(q['description_id'])\n\t\tcorrect_answer=q['qq_correct_answer']\n\t\tresult=cur.execute(\"select * from quiz_answers_commit where qq_number=%s and qs_id=%s\",(param,[id]))\n\n\t\tif correct_answer=='s':\n\t\t\tcombo=\"short\"\n\t\t\tanswers=cur.fetchone()\n\t\t\tform.op_short.data=answers['qa_answer']\n\t\telse:\n\t\t\tcombo=\"multi\"\n\t\t\tc=0\n\t\t\tanswers=cur.fetchall()\n\t\t\tfor row in answers:\n\t\t\t\tif c==0:\n\t\t\t\t\tform.op1.data=row['qa_answer']\n\t\t\t\telif c==1:\n\t\t\t\t\tform.op2.data=row['qa_answer']\n\t\t\t\telif c==2:\n\t\t\t\t\tform.op3.data=row['qa_answer']\n\t\t\t\telif c==3:\n\t\t\t\t\tform.op4.data=row['qa_answer']\n\t\t\t\tc=c+1\n\t\treturn render_template('quiz_modify.html',form=form,qlist=qlist,correct_answer=correct_answer,combo=combo,qid=param,title=title,uid=unit_id)\n\n\tif request.method=='POST':\n\t\tquestion_body=request.form['Question']\n\t\tcur=mysql.connection.cursor()\n\t\tresult=cur.execute(\"select count(*) as m from quiz_questions_commit where qs_id=%s\",[id])\n\t\tdata=cur.fetchone()\n\t\tqn=data['m']+1\n\t\tapp.logger.info(\"2aa\")\n\t\tans1=request.form['op1']\n\t\tans2=request.form['op2']\n\t\tans3=request.form['op3']\n\t\tans4=request.form['op4']\n\t\tapp.logger.info(\"3aa\")\n\t\tansShort=request.form['op_short']\n\t\tapp.logger.info(\"4aa\")\n\t\tchklist=request.form.getlist('chk_op')\n\t\tsubject_id=request.form['subject']\n\t\tcategory_id=request.form['categories']\n\t\tdescription_id=request.form['description']\n\n\t\tc=0\n\t\taa=\"abcd\"\n\t\tanswer=\"\"\n\t\tif len(ansShort)>0:\n\t\t\tanswer=\"s\"\n\t\telse:\n\t\t\tfor value in chklist:\n\t\t\t\tapp.logger.info(value)\n\t\t\t\tanswer=aa[int(value)]\n\t\tapp.logger.info(\"5aa\")\n\t\tif request.form['btnrq']==\"Update\": #if this was for updating question content\n\t\t\tqid=request.form['post_qid']\n\t\t\tapp.logger.info(\"qid=\"+qid)\n\n\t\t\tcur.execute(\"update quiz_questions_commit set qq_body=%s,qq_correct_answer=%s, subject_id=%s, category_id=%s, description_id=%s where qq_number=%s and qs_id=%s\",(question_body,answer,subject_id,category_id,description_id,qid,[id]))\n\t\t\tif answer==\"s\":\n\t\t\t\tcur.execute(\"update quiz_answers_commit set qa_answer=%s where qq_number=%s and qa_answer_number=\\\"s\\\" and qs_id=%s\",(ansShort,qid,[id]))\n\t\t\telse:\n\t\t\t\tcur.execute(\"update quiz_answers_commit set qa_answer=%s where qa_answer_number=%s and qq_number=%s and qs_id=%s\",(ans1,'a',qid,[id]))\n\t\t\t\tcur.execute(\"update quiz_answers_commit set qa_answer=%s where qa_answer_number=%s and qq_number=%s and qs_id=%s\",(ans2,'b',qid,[id]))\n\t\t\t\tcur.execute(\"update quiz_answers_commit set qa_answer=%s where qa_answer_number=%s and qq_number=%s and qs_id=%s\",(ans3,'c',qid,[id]))\n\t\t\t\tcur.execute(\"update quiz_answers_commit set qa_answer=%s where qa_answer_number=%s and qq_number=%s and qs_id=%s\",(ans4,'d',qid,[id]))\n\t\t\tmysql.connection.commit()\n\t\telif request.form['btnrq']==\"Add a quiz\": #if this was for adding a new question\n\t\t\tcur.execute(\"insert into quiz_questions_commit(qq_body,qq_number,qq_correct_answer,qs_id,subject_id,category_id,description_id) values (%s,%s,%s,%s,%s,%s,%s)\",(question_body,qn,answer,[id],subject_id,category_id,description_id))\n\t\t\tif answer==\"s\":\n\t\t\t\tcur.execute(\"insert into quiz_answers_commit(qa_answer,qq_number,qa_answer_number,qs_id) values (%s,%s,'s',%s)\",(ansShort,qn,[id]))\n\t\t\telse:\n\t\t\t\tcur.execute(\"insert into quiz_answers_commit(qa_answer,qq_number,qa_answer_number,qs_id) values (%s,%s,'a',%s)\",(ans1,qn,[id]))\n\t\t\t\tcur.execute(\"insert into quiz_answers_commit(qa_answer,qq_number,qa_answer_number,qs_id) values (%s,%s,'b',%s)\",(ans2,qn,[id]))\n\t\t\t\tcur.execute(\"insert into quiz_answers_commit(qa_answer,qq_number,qa_answer_number,qs_id) values (%s,%s,'c',%s)\",(ans3,qn,[id]))\n\t\t\t\tcur.execute(\"insert into quiz_answers_commit(qa_answer,qq_number,qa_answer_number,qs_id) values (%s,%s,'d',%s)\",(ans4,qn,[id]))\n\t\t\tmysql.connection.commit()\n\t\telse: #finished creating quizes\n\t\t\tcur.execute(\"select count(*) as m from quiz_questions_commit where qs_id=%s\",[id])\n\t\t\tc=cur.fetchone()\n\t\t\tcur.execute(\"update quiz_sets set number_of_questions=%s where qs_id=%s\",(c['m'],id))\n\t\t\tmysql.connection.commit()\n\t\t\tcur.close()\n\t\t\treturn redirect(url_for('quiz_manager'))\n\t\tcur.close()\n\t\treturn redirect(url_for('quiz_modify_quiz',id=id))\n\treturn render_template('quiz_modify.html',form=form,qlist=qlist,combo=combo,correct_answer=correct_answer)\n\n\n\n#+Create and edit staging quiz set\n@app.route('/quiz_createquiz',methods=['GET','POST'])\n@is_logged_in\n@is_admin\ndef quiz_Create_new_quiz():\n\tcur=mysql.connection.cursor()\n\tresult=cur.execute(\"select * from quiz_staging_questions\")\n\tqlist=cur.fetchall()\n\tform = QuizForm(request.form)\n\tcur.execute(\"select id,name from ref_subjects order by name\")\n\tresult=cur.fetchall()\n\tform.subject.choices=[(a['id'],a['name']) for a in result]\n\tcur.execute(\"select id,name from ref_description order by name\")\n\tresult=cur.fetchall()\n\tform.description.choices=[(a['id'],str(a['name'])) for a in result]\n\tcur.execute(\"select id,name from ref_categories order by name\")\n\tresult=cur.fetchall()\n\tform.categories.choices=[(a['id'],a['name']) for a in result]\n\n\ttitle=session['title']\n\tunit_id=session['unit_id']\n\tparam=request.args.get('edit',default=\"x\")\n\tcombo=\"\"\n\tcorrect_answer=\"\"\n\n\tif request.method=='GET' and param!='x': #edit view of a question\n\t\tresult=cur.execute(\"select qq_number, qq_body,qq_correct_answer,subject_id,category_id,description_id from quiz_staging_questions where qq_number=%s\",(param))\n\t\tq=cur.fetchone()\n\t\tform.Question.data=q['qq_body']\n\t\tform.subject.process_data(q['subject_id'])\n\t\tform.categories.process_data(q['category_id'])\n\t\tform.description.process_data(q['description_id'])\n\t\tcorrect_answer=q['qq_correct_answer']\n\t\tresult=cur.execute(\"select * from quiz_staging_answers where qq_number=%s\",(param))\n\t\tif correct_answer=='s':\n\t\t\tcombo=\"short\"\n\t\t\tanswers=cur.fetchone()\n\t\t\tform.op_short.data=answers['qa_answer']\n\t\telse:\n\t\t\tcombo=\"multi\"\n\t\t\tc=0\n\t\t\tanswers=cur.fetchall()\n\t\t\tfor row in answers:\n\t\t\t\tif c==0:\n\t\t\t\t\tform.op1.data=row['qa_answer']\n\t\t\t\telif c==1:\n\t\t\t\t\tform.op2.data=row['qa_answer']\n\t\t\t\telif c==2:\n\t\t\t\t\tform.op3.data=row['qa_answer']\n\t\t\t\telif c==3:\n\t\t\t\t\tform.op4.data=row['qa_answer']\n\t\t\t\tc=c+1\n\t\treturn render_template('quiz_createquiz.html',form=form,qlist=qlist,correct_answer=correct_answer,combo=combo,qid=param,title=title,uid=unit_id)\n\tif request.method=='POST':\n\t\tquestion_body=request.form['Question']\n\t\tcur=mysql.connection.cursor()\n\t\tresult=cur.execute(\"select count(*) as m from quiz_staging_questions\")\n\t\tdata=cur.fetchone()\n\t\tqn=data['m']+1\n\t\tans1=request.form['op1']\n\t\tans2=request.form['op2']\n\t\tans3=request.form['op3']\n\t\tans4=request.form['op4']\n\t\tansShort=request.form['op_short']\n\t\tchklist=request.form.getlist('chk_op')\n\t\tsubject_id=request.form['subject']\n\t\tcategory_id=request.form['categories']\n\t\tdescription_id=request.form['description']\n\t\tc=0\n\t\taa=\"abcd\"\n\t\tanswer=\"\"\n\t\tif len(ansShort)>0:\n\t\t\tanswer=\"s\"\n\t\telse:\n\t\t\tfor value in chklist:\n\t\t\t\tapp.logger.info(value)\n\t\t\t\tanswer=aa[int(value)]\n\t\tif request.form['btnrq']==\"Update\": #if this was for updating question content\n\t\t\tqid=request.form['post_qid']\n\t\t\tapp.logger.info(\"qid=\"+qid)\n\n\t\t\tcur.execute(\"update quiz_staging_questions set qq_body=%s,qq_correct_answer=%s, subject_id=%s, category_id=%s, description_id=%s where qq_number=%s\",(question_body,answer,subject_id,category_id,description_id,qid))\n\t\t\tif answer==\"s\":\n\t\t\t\tcur.execute(\"update quiz_staging_answers set qa_answer=%s where qq_number=%s and qa_answer_number=\\\"s\\\"\",(ansShort,qid))\n\t\t\telse:\n\t\t\t\t# app.logger.info(\"qid=\"+qid)\n\t\t\t\t# s=\"update quiz_staging_answers set qa_answer='\"+ans1+\"' where qa_answer_number='a' and qq_number=\"+qid\n\t\t\t\t# app.logger.info(s)\n\t\t\t\t# cur.execute(s)\n\t\t\t\tcur.execute(\"update quiz_staging_answers set qa_answer=%s where qa_answer_number=%s and qq_number=%s\",(ans1,'a',qid))\n\t\t\t\tcur.execute(\"update quiz_staging_answers set qa_answer=%s where qa_answer_number=%s and qq_number=%s\",(ans2,'b',qid))\n\t\t\t\tcur.execute(\"update quiz_staging_answers set qa_answer=%s where qa_answer_number=%s and qq_number=%s\",(ans3,'c',qid))\n\t\t\t\tcur.execute(\"update quiz_staging_answers set qa_answer=%s where qa_answer_number=%s and qq_number=%s\",(ans4,'d',qid))\n\t\t\tmysql.connection.commit()\n\t\telif request.form['btnrq']==\"Add a quiz\": #if this was for adding a new question\n\t\t\tcur.execute(\"insert into quiz_staging_questions(qq_body,qq_number,qq_correct_answer,subject_id,category_id,description_id) values (%s,%s,%s,%s,%s,%s)\",(question_body,qn,answer,subject_id,category_id,description_id))\n\t\t\tif answer==\"s\":\n\t\t\t\tcur.execute(\"insert into quiz_staging_answers(qa_answer,qq_number,qa_answer_number) values (%s,%s,'s')\",(ansShort,qn))\n\t\t\telse:\n\t\t\t\tcur.execute(\"insert into quiz_staging_answers(qa_answer,qq_number,qa_answer_number) values (%s,%s,'a')\",(ans1,qn))\n\t\t\t\tcur.execute(\"insert into quiz_staging_answers(qa_answer,qq_number,qa_answer_number) values (%s,%s,'b')\",(ans2,qn))\n\t\t\t\tcur.execute(\"insert into quiz_staging_answers(qa_answer,qq_number,qa_answer_number) values (%s,%s,'c')\",(ans3,qn))\n\t\t\t\tcur.execute(\"insert into quiz_staging_answers(qa_answer,qq_number,qa_answer_number) values (%s,%s,'d')\",(ans4,qn))\n\t\t\tmysql.connection.commit()\n\t\telse: #finished creating quizes\n\t\t\tqf=unit_id+title\n\t\t\tcur.execute(\"insert into quiz_sets (qs_name,qs_type,unit_id,number_of_questions,quizfile) values (%s,%s,%s,%s,%s)\",(title,'Multi-choice',unit_id,(qn-1),qf))\n\t\t\tmysql.connection.commit()\n\t\t\tcur.execute(\"set @nid=Last_Insert_ID()\")\n\t\t\tcur.execute(\"insert into quiz_questions_commit (qq_body,qq_number,qq_correct_answer,qs_id,subject_id,category_id,description_id) select qq_body,qq_number,qq_correct_answer, @nid,subject_id,category_id,description_id from quiz_staging_questions\")\n\t\t\tcur.execute(\"insert into quiz_answers_commit (qa_answer,qq_number,qa_answer_number,qs_id) select qa_answer,qq_number,qa_answer_number, @nid from quiz_staging_answers\")\n\t\t\tmysql.connection.commit()\n\n\t\t\tcur.execute(\"delete from quiz_staging_questions\")\n\t\t\tcur.execute(\"delete from quiz_staging_answers\")\n\t\t\tmysql.connection.commit()\n\t\t\tcur.close()\n\t\t\treturn redirect(url_for('quiz_manager'))\n\t\tcur.close()\n\t\treturn redirect(url_for('quiz_Create_new_quiz'))\n\treturn render_template('quiz_createquiz.html',form=form,qlist=qlist,combo=combo,correct_answer=correct_answer)\n\n#Courses\n#+Register form class\nclass CourseForm(Form):\n\tname = StringField('Course Name', [validators.Length(min=1, max=100)])\n\tdesc = TextAreaField('Course Description', [validators.Length(min=1)])\n\tsd=DateField('Starting Date',format='%Y-%m-%d')\n\ted=DateField('Ending Date',format='%Y-%m-%d')\n\n\t# year=SelectField('Course Year',choices=[('2017', '2017'), ('2018', '2018'), ('2019', '2019')],coerce=int)\n\n\tyears = [(y, str(y)) for y in reversed(range(2017, 2027))]\n\t#years.insert(0, ('','year'))\n\tyear = SelectField(choices=years, coerce=int)\n\n\tsemester=SelectField('Semester',choices=[(1, '1'), (2, '2')], coerce=int)\n\tstatus=SelectField('Status',choices=[('Draft','Draft'),('Deleted','Deleted'),('Active','Active')])\n\n\tselected_units=StringField('linked_units')\n\t#unit= SelectMultipleField(u'Units', coerce=int, option_widget=widgets.CheckboxInput(), widget=widgets.ListWidget(html_tag='ul', prefix_label=False))\n\t# unit=SelectMultipleField('Units',coerce=int)\n\n\t# unit = SelectMultipleField(u'Units', coerce=int,\n\t# \t\t\t\t\t\t\toption_widget=widgets.CheckboxInput(),\n\t# \t\t\t\t\t\t\twidget=widgets.TableWidget())\n # \t\t\t\t\t\t#widget=widgets.ListWidget(html_tag='ul', prefix_label=False))\n\n\n#+Course enrolment\n@app.route('/enrol_course/',methods=['POST'])\n@is_logged_in\ndef enrol_course(id):\n\t#create cursor\n\tcur=mysql.connection.cursor()\n\n\t#see if withdrawal record exists in people_units.\n\tresult=cur.execute(\"select * from people_units where course_id=%s and people_id=%s\",([id],session['id']))\n\trs=cur.fetchall()\n\tif result>0:\n\t\tfor r in rs:\n\t\t\tif r['status']!=\"Active\":\n\t\t\t\tcur.execute(\"update people_units set status='Active' where course_id=%s and people_id=%s\",([id],session['id']))\n\n\telse: #if this is a new enrolment, fo the followings\n\t\t#get the list of linked units for the course_id\n\t\tresult=cur.execute(\"select cul_child from course_unit_links where cul_parent=%s\",([id]))\n\t\tunitslist=cur.fetchall()\n\n\t\t#If linked units exist, add those units to people_units too.\n\t\t#if there are no linked units, just add the course enrolment\n\t\tif result>0:\n\t\t\tfor u in unitslist:\n\t\t\t\tcur.execute(\"insert into people_units (unit_id,course_id,people_id,status) values (%s,%s,%s,'Active')\", (u['cul_child'],[id],session['id']))\n\t\telse:\n\t\t\tcur.execute(\"insert into people_units (course_id,people_id,status) values (%s,%s,'Active')\", ([id],session['id']))\n\n\t#commit\n\tmysql.connection.commit()\n\n\n\t#close\n\tcur.close()\n\n\tflash('Course Enrolled','success')\n\treturn redirect(url_for('courses'))\n\n#withdraw from course\n@app.route('/withdraw_course/',methods=['POST'])\n@is_logged_in\ndef withdraw_course(id):\n\t#create cursor\n\tcur=mysql.connection.cursor()\n\n\t#get the list of linked units for the course_id\n\tresult=cur.execute(\"select pu_id from people_units where course_id=%s and people_id=%s\",([id],session['id']))\n\tunitslist=cur.fetchall()\n\n\t#execute\n\tif result>0:\n\t\tcur.execute(\"update people_units set status='Withdrawal' where course_id=%s and people_id=%s\",([id],session['id']))\n\t\tmysql.connection.commit()\n\tcur.close\n\tflash('Withdrawn from the course','success')\n\treturn redirect(url_for('courses'))\n\n#+Courses list\n@app.route('/courses')\n@is_logged_in\ndef courses():\n\t#create cursor\n\tcur=mysql.connection.cursor()\n\n\t#get my courses\n\ts=\"select * from courses where exists(select 1 from people_units where people_units.course_id=courses.id and status='Active' and people_units.people_id={})\".format(session['id'])\n\t# s=\"select courses.*,pu.pu_id,pu.people_id from courses inner join people_units pu on pu.course_id=courses.id inner join people p on p.id=pu.people_id where p.username='\"+session['username']+\"'\"\n\tapp.logger.info(s)\n\tresult=cur.execute(s)\n\tcourses=cur.fetchall()\n\n\t#get not yet enrolled courses\n\ts=\"select * from courses where not exists(select 1 from people_units where people_units.course_id=courses.id and people_units.people_id={}) or exists(select 1 from people_units where people_units.course_id=courses.id and people_units.people_id={} and status='Withdrawal')\".format(session['id'],session['id'])\n\tresult=cur.execute(s)\n\topencourses=cur.fetchall()\n\n\tif result > 0:\n\t\treturn render_template('courses.html', courses=courses,opencourses=opencourses, pid=session['id'])\n\telse:\n\t\tmsg='No courses found'\n\t\treturn render_template('dashboard'.html, msg=msg)\n\n\t#close connection\n\tcur.close()\n\n#+single course\n@app.route('/course//')\ndef course(id):\n\t#create cursor\n\tcur=mysql.connection.cursor()\n\n\t#get course\n\tresult=cur.execute(\"select * from courses where id = %s\", [id])\n\tcourse=cur.fetchone()\n\tresult=cur.execute(\"SELECT u.* FROM course_unit_links cul inner join units u on u.id=cul.cul_child \" +\n\t\t\t\t\t\t\"where cul.cul_parent=%s\",[id])\n\tunitlist=cur.fetchall()\n\n\t#check if the user has enrolled already\n\tresult=cur.execute(\"select pu_id from people_units where course_id=%s and people_id=%s and status='Active'\",([id],session['id']))\n\tenrolled=False\n\tif result>0:\n\t\tenrolled=True\n\n\treturn render_template('course.html', course=course,unitlist=unitlist,cid=id,enrolled=enrolled)\n\n\n#+Edit Course\n@app.route('/edit_course/',methods=['GET','POST'])\n@is_admin\ndef edit_course(id):\n\n\t# Create cursor\n\tcur=mysql.connection.cursor()\n\n\t#get course by id\n\tresult=cur.execute(\"select * from courses where id=%s\", [id])\n\tcourse=cur.fetchone()\n\n\t#Get form\n\tform = CourseForm(request.form)\n\n\t#populate Course form fields\n\tform.name.data=course['course_name']\n\tform.desc.data=course['course_desc']\n\t# sd=form.sd_year.data + '-' + form.sd_month.data +'-'+ form.sd_day.data\n\t# ed=form.ed_year.data + '-' + form.ed_month.data +'-'+ form.ed_day.data\n\tform.sd.data=course['start_date']\n\tform.ed.data=course['end_date']\n\n\tform.year.data=course['year']\n\tform.semester.data=course['semester']\n\n\tform.status.data=course['status']\n\n\t#get Linked units by course id\n\tresult=cur.execute(\"SELECT u.* from course_unit_links cul inner join units u on u.id=cul.cul_child where cul.cul_parent=%s\",[id])\n\tunitlist=cur.fetchall()\n\n\t#Convert dict unit ids into string\n\tWasLinkedIDs=','.join(str(u['id']) for u in unitlist)\n\n\t#get Unlinked units by course id\n\tresult=cur.execute(\"SELECT u.* from units u Where not exists(select cul_child from course_unit_links where cul_parent=%s and u.id=cul_child)\",[id])\n\tunitlist_unlinked=cur.fetchall()\n\tcur.close()\n\n\n\tif request.method == 'POST' and form.validate():\n\n\t\t#get the linked units\n\t\tlinkedUnits=request.form.getlist('chk_linkedunits')\n\t\tLinked_IDs=','.join(str(x) for x in linkedUnits)\n\n\t\t#get the new linked units and merge with Linked_IDs\n\t\tunlinkedUnits=request.form.getlist('chk_unlinkedunits')\n\t\tnewids=','.join(str(x) for x in unlinkedUnits)\n\n\t\tapp.logger.info(\"Originally linked units:\"+WasLinkedIDs)\n\t\tapp.logger.info(\"Units that are still being checked:\"+Linked_IDs)\n\t\tapp.logger.info(\"new checked units:\"+newids)\n\n\t\tif len(newids)>=1:\n\t\t\tLinked_IDs=Linked_IDs + \",\" + newids\n\n\t\t#Compare Old linked IDs and new linked ids and return the difference\n\t\tNow_Unlinked_IDs=set(WasLinkedIDs.split(\",\"))-set(Linked_IDs)\n\n\n\t\tapp.logger.info(\"All checked units:\"+Linked_IDs)\n\t\tapp.logger.info(\"units that are now unlinked:\"+str(Now_Unlinked_IDs))\n\n\t\tcur=mysql.connection.cursor()\n\t\t# for str(x) in linkedUnits:\n\n\n\t\tfor u_id in Linked_IDs.split(\",\"):\n\t\t\tif len(u_id)>0:\n\t\t\t\tresult=cur.execute(\"select count(1) as m from course_unit_links where cul_parent=%s and cul_child=%s\",([id],u_id))\n\t\t\t\tdbcount=cur.fetchone()\n\t\t\t\tif dbcount['m']<=0:\n\t\t\t\t\tcur.execute(\"insert into course_unit_links (cul_parent,cul_child) values (%s,%s)\",([id],u_id))\n\n\t\tfor u_id in Now_Unlinked_IDs:\n\t\t\tresult=cur.execute(\"select count(1) as m from course_unit_links where cul_parent=%s and cul_child=%s\",([id],u_id))\n\t\t\tdbcount=cur.fetchone()\n\t\t\tif dbcount['m']>0:\n\t\t\t\tcur.execute(\"delete from course_unit_links where cul_parent=%s and cul_child=%s\",([id],u_id))\n\n\t\t# \treturn redirect(url_for('courses'))\n\t\t# return render_template('edit_course.html',form=form, unitlist=unitlist,unitlist_unlinked=unitlist_unlinked)\n\n\t\tname=request.form['name']\n\t\tdesc=request.form['desc']\n\t\tsd=request.form['sd']\n\t\ted=request.form['ed']\n\t\tstatus=request.form['status']\n\t\tyear=request.form['year']\n\t\tsemester=request.form['semester']\n\n\t\t#Create cursor\n\t\tcur=mysql.connection.cursor()\n\n\t\t#Execute\n\t\tcur.execute(\"update courses set course_name=%s, course_desc=%s, status=%s, start_date=%s, end_date=%s, year=%s, semester=%s where id=%s\", (name,desc,status,sd,ed,year,semester,[id]))\n\n\t\t#commit\n\t\tmysql.connection.commit()\n\n\t\t#close\n\t\tcur.close()\n\n\t\tflash('Course Updated','success')\n\t\treturn redirect(url_for('courses'))\n\n\treturn render_template('edit_course.html',form=form, unitlist=unitlist,unitlist_unlinked=unitlist_unlinked)\n\n\n#+Create Courses\n@app.route('/create_course', methods=['GET','POST'])\n@is_admin\ndef create_course():\n\tform = CourseForm(request.form)\n\n\t#update unit multiselect field\n\tcur=mysql.connection.cursor()\n\tresult=cur.execute(\"select * from units\")\n\tunits_list=cur.fetchall()\n\t# form.unit.choices=[(r['id'],r['unit_name']) for r in units_list]\n\tcur.close()\n\n\tif request.method == 'POST' and form.validate():\n\t\tname=form.name.data\n\t\tdesc=form.desc.data\n\t\t# sd=form.sd_year.data + '-' + form.sd_month.data +'-'+ form.sd_day.data\n\t\t# ed=form.ed_year.data + '-' + form.ed_month.data +'-'+ form.ed_day.data\n\t\tsd=form.sd.data\n\t\ted=form.ed.data\n\t\tstatus=form.status.data\n\t\tyear=form.year.data\n\t\tsemester=form.semester.data\n\t\t# units=form.unit.data\n\n\t\t#Create cursor\n\t\tcur=mysql.connection.cursor()\n\n\t\t#Execute\n\n\t\tcur.execute(\"insert into courses (course_name,course_desc,status,start_date,end_date,year,semester,created_by,created_username) values (%s,%s,%s,%s,%s,%s,%s,%s,%s)\", (name,desc,status,sd,ed,year,semester,session['fullname'],session['username']))\n\n\t\t#commit\n\t\tmysql.connection.commit()\n\n\t\t#Insert course_unit links\n\t\t#+get the checked units\n\t\tlinkedUnits=request.form.getlist('chk_linkedunits')\n\t\tnewids=','.join(str(x) for x in linkedUnits)\n\n\t\t#+get the course id\n\t\tcur.execute(\"select max(id) as m from courses \")\n\t\tcid=cur.fetchone()\n\t\tnew_course_id=cid['m']\n\n\t\t#+now insert new course_unit_links\n\t\tfor u_id in newids.split(\",\"):\n\t\t\tcur.execute(\"insert into course_unit_links (cul_parent,cul_child) values (%s,%s)\",(new_course_id,u_id))\n\n\t\tmysql.connection.commit()\n\n\t\t#close\n\t\tcur.close()\n\n\t\tflash('Course Created','success')\n\n\t\treturn redirect(url_for('dashboard'))\n\treturn render_template('create_course.html',form=form,units_list=units_list)\n\n\n#units\n#+Unit form class\nclass UnitForm(Form):\n\tsubject=SelectField('Subject',coerce=int)\n\tname = StringField('Unit Name', [validators.Length(min=1, max=100)])\n\tdesc = TextAreaField('Unit Description', [validators.Length(min=1)])\n\tsd=DateField('Starting Date',format='%Y-%m-%d')\n\ted=DateField('Ending Date',format='%Y-%m-%d')\n\tyears = [(y, str(y)) for y in reversed(range(2017, 2027))]\n\tyear = SelectField(choices=years, coerce=int)\n\tsemester=SelectField('Semester',choices=[(1, '1'), (2, '2')], coerce=int)\n\tstatus=SelectField('Status',choices=[('Draft','Draft'),('Deleted','Deleted'),('Active','Active')])\n\tselected_units=StringField('linked_courses')\n\n#+Edit Unit\n@app.route('/edit_unit/',methods=['GET','POST'])\n@is_admin\ndef edit_unit(id):\n\t#Get form\n\tform = UnitForm(request.form)\n\t# Create cursor\n\tcur=mysql.connection.cursor()\n\n\t#get the refs\n\tresult=cur.execute(\"select * from ref_subjects order by name\")\n\tref_subjects=cur.fetchall()\n\tform.subject.choices = [(c['id'], c['name']) for c in ref_subjects]\n\n\t#get unit by id\n\tresult=cur.execute(\"select * from units where id=%s\", [id])\n\tunit=cur.fetchone()\n\n\n\n\t#populate Unit form fields\n\tform.subject.process_data(unit['subject'])\n\tform.name.data=unit['unit_name']\n\tform.desc.data=unit['unit_desc']\n\tform.sd.data=unit['start_date']\n\tform.ed.data=unit['end_date']\n\tform.year.data=unit['year']\n\tform.semester.data=unit['semester']\n\tform.status.data=unit['status']\n\n\t#get Linked courses by unit id\n\tresult=cur.execute(\"SELECT c.* FROM course_unit_links cul inner join courses c on c.id=cul.cul_parent \" +\n\t\t\t\t\t\t\"where cul.cul_child=%s\",[id])\n\tcourselist=cur.fetchall()\n\n\t#get linked quizes\n\tresult=cur.execute(\"select * from quiz_sets where unit_id=%s\",[id])\n\tquizlist=cur.fetchall()\n\n\t#Convert dict quiz ids into string\n\tWasLinkedIDs=','.join(str(u['qs_id']) for u in quizlist)\n\n\t#get Unlinked quizes by unit id\n\tresult=cur.execute(\"SELECT u.* from quiz_sets u Where u.unit_id is null\")\n\tquizlist_unlinked=cur.fetchall()\n\n\t#close cursor\n\tcur.close()\n\n\tif request.method == 'POST' and form.validate():\n\n\t\t#get the linked quizes\n\t\tlinkedQuizes=request.form.getlist('chk_linkedquizes')\n\t\tLinked_IDs=','.join(str(x) for x in linkedQuizes)\n\n\t\t#get the new linked units and merge with Linked_IDs\n\t\tunlinkedQuizes=request.form.getlist('chk_unlinkedquizes')\n\t\tnewids=','.join(str(x) for x in unlinkedQuizes)\n\n\t\tapp.logger.info(\"Originally linked Qz:\"+WasLinkedIDs)\n\t\tapp.logger.info(\"Qzs that are still being checked:\"+Linked_IDs)\n\t\tapp.logger.info(\"new checked Qz:\"+newids)\n\n\t\tif len(Linked_IDs)>=1:\n\t\t\tif len(newids)>=1:\n\t\t\t\tLinked_IDs=Linked_IDs + \",\" + newids\n\t\t\telse:\n\t\t\t\tLinked_IDs=Linked_IDs\n\t\telse:\n\t\t\tLinked_IDs=newids\n\n\t\t#Compare Old linked IDs and new linked ids and return the difference\n\t\tNow_Unlinked_IDs=set(WasLinkedIDs.split(\",\"))-set(Linked_IDs)\n\n\n\t\tapp.logger.info(\"All checked Qzs:\"+Linked_IDs)\n\t\tapp.logger.info(\"Qzs that are now unlinked:\"+str(Now_Unlinked_IDs))\n\n\t\t#create cursor\n\t\tcur=mysql.connection.cursor()\n\n\t\tfor q_id in Linked_IDs.split(\",\"):\n\t\t\tif len(q_id)>0: #if the quiz has no unit_id then assign the unit_id to the quiz_set\n\t\t\t\tapp.logger.info(\"add new unit_id into qs:\"+q_id+\", unit_id=\"+id)\n\t\t\t\tresult=cur.execute(\"select count(qs_id) as m from quiz_sets where unit_id is null and qs_id=%s\",(q_id))\n\t\t\t\tdbcount=cur.fetchone()\n\t\t\t\tif dbcount['m']>0:\n\t\t\t\t\tcur.execute(\"update quiz_sets set unit_id=%s where qs_id=%s\",([id],q_id))\n\t\t\t\t\tmysql.connection.commit()\n\n\t\tfor q_id in Now_Unlinked_IDs: #if the quiz was linked and not unlinked, unassign the unit_id from the quiz set\n\t\t\tresult=cur.execute(\"select count(qs_id) as m from quiz_sets where unit_id=%s and qs_id=%s\",([id],q_id))\n\t\t\tdbcount=cur.fetchone()\n\t\t\tif dbcount['m']>0:\n\t\t\t\tapp.logger.info(\"null candidate:\"+q_id)\n\t\t\t\tcur.execute(\"update quiz_sets set unit_id = NULL where qs_id in (\"+q_id+\")\")\n\t\t\t\tmysql.connection.commit()\n\n\t\tname=request.form['name']\n\t\tdesc=request.form['desc']\n\t\tsd=request.form['sd']\n\t\ted=request.form['ed']\n\t\tstatus=request.form['status']\n\t\tyear=request.form['year']\n\t\tsemester=request.form['semester']\n\t\tsubject=request.form['subject']\n\n\t\t#Execute\n\t\ts=(\"update units set subject=%s,units_name=%s,units_desc=%s,status=%s,start_date=%s,end_date=%s,year=%s,semester=%s) where id=%s\", (subject,name,desc,status,sd,ed,year,semester,[id]))\n\t\tapp.logger.info(s)\n\t\tcur.execute(\"update units set subject=\"+subject+\",unit_name=%s,unit_desc=%s,status=%s,start_date=%s,end_date=%s,year=%s,semester=%s where id=\"+str(id), (name,desc,status,sd,ed,year,semester))\n\n\t\t#commit\n\t\tmysql.connection.commit()\n\n\n\n\t\tflash('Unit Updated','success')\n\t\treturn redirect(url_for('units'))\n\n\treturn render_template('edit_unit.html',form=form, courselist=courselist, quizlist=quizlist, quizlist_unlinked=quizlist_unlinked)\n\n\n#+Create Units\n@app.route('/create_unit', methods=['GET','POST'])\n@is_admin\ndef create_Unit():\n\tform = UnitForm(request.form)\n\n\t# Create cursor\n\tcur=mysql.connection.cursor()\n\n\t#get the refs\n\tresult=cur.execute(\"select * from ref_subjects order by name\")\n\tref_subjects=cur.fetchall()\n\tform.subject.choices = [(c['id'], c['name']) for c in ref_subjects]\n\tcur.close()\n\t#update unit multiselect field\n\t# cur=mysql.connection.cursor()\n\t# result=cur.execute(\"select * from courses\")\n\t# course_lisr=cur.fetchall()\n\t# cur.close()\n\n\tif request.method == 'POST' and form.validate():\n\t\tname=form.name.data\n\t\tdesc=form.desc.data\n\t\t# sd=form.sd_year.data + '-' + form.sd_month.data +'-'+ form.sd_day.data\n\t\t# ed=form.ed_year.data + '-' + form.ed_month.data +'-'+ form.ed_day.data\n\t\tsd=form.sd.data\n\t\ted=form.ed.data\n\t\tstatus=form.status.data\n\t\tyear=form.year.data\n\t\tsemester=form.semester.data\n\t\tsubject=form.subject.data\n\t\t# units=form.unit.data\n\n\t\t#Create cursor\n\t\tcur=mysql.connection.cursor()\n\n\t\t#Execute\n\n\t\tcur.execute(\"insert into units (unit_name,unit_desc,status,start_date,end_date,year,semester,subject) values (%s,%s,%s,%s,%s,%s,%s,%s)\", (name,desc,status,sd,ed,year,semester,subject))\n\n\t\t#commit\n\t\tmysql.connection.commit()\n\n\t\t# #Insert course_unit links\n\t\t# cur.execute(\"select max(id) as m from courses \")\n\t\t# cid=cur.fetchone()\n\t\t# new_course_id=cid['m']\n\t\t# for u in units:\n\t\t# \tcur.execute(\"insert into course_unit_links (cul_parent,cul_child) values (%s,%s)\",(new_course_id,u))\n\t\t# mysql.connection.commit()\n\n\t\t#close\n\t\tcur.close()\n\n\t\tflash('Unit Created','success')\n\n\t\treturn redirect(url_for('units'))\n\treturn render_template('create_unit.html',form=form)\n\n\n#+Units list\n@app.route('/units')\ndef units():\n\t#create cursor\n\tcur=mysql.connection.cursor()\n\n\t#get my courses\n\ts= 'SELECT u.id, u.unit_name, u.unit_desc, u.status, u.start_date, ' \\\n\t + 'u.end_date, u.year, u.semester, r_sbj.name as subject, coalesce(x.m,0) as qcount ' \\\n\t\t+ 'FROM units u left join (select unit_id,count(qs_id) as m from quiz_sets group by unit_id) x on x.unit_id=u.id ' \\\n\t\t+ 'left join ref_subjects r_sbj on r_sbj.id=u.subject ' \\\n\t \t+ 'order by subject, year, semester desc'\n\tresult=cur.execute(s)\n\tunits=cur.fetchall()\n\t#close connection\n\tcur.close()\n\tif result > 0:\n\t\treturn render_template('units.html', units=units)\n\telse:\n\t\tmsg='No courses found'\n\t\treturn render_template('units'.html, msg=msg, units=units)\n\n\n#+single unit\n@app.route('/unit//')\ndef unit(id):\n\n\t#create cursor\n\tcur=mysql.connection.cursor()\n\n\t#get unit\n\tresult=cur.execute(\"select * from units where id = %s\", [id])\n\tunit=cur.fetchone()\n\n\t#get linked courses\n\tresult=cur.execute(\"SELECT c.* FROM course_unit_links cul inner join courses c on c.id=cul.cul_parent \" +\n\t\t\t\t\t\t\"where cul.cul_child=%s\",[id])\n\tcourselist=cur.fetchall()\n\n\t#get quizes\n\tresult=cur.execute(\"select * from quiz_sets where unit_id=%s\",[id])\n\tquizlist=cur.fetchall()\n\tcur.close()\n\n\t#get people_unit ID\n\tcid=request.args.get('cid')\n\tpu_id=get_pu_id(cid,id,session['id'])\n\n\t#set session's question index number\n\tsession['qidx']=0\n\tsession['correct']=0\n\tsession['answers']=[\"x\" for i in range(100)]\n\tsession['useranswers']=[\"\" for i in range(100)]\n\tsession['pu_id']=pu_id\n\treturn render_template('unit.html', unit=unit,courselist=courselist, quizlist=quizlist,pu_id=pu_id)\n\n@app.route('/quiz_solve/',methods=['GET','POST'])\ndef quiz_solve(id):\n\t#get the quiz set ID\n\tcur=mysql.connection.cursor()\n\tpuid=session['pu_id']\n\tqno=request.args.get('q',default=0)\n\tresult=cur.execute(\"select * from quiz_sets where qs_id=%s\",[id])\n\tqset=cur.fetchone()\n\tsession['totalq']=qset['number_of_questions']\n\tsession['qs_id']=id\n\t#get the question by index number (starting from 0)\n\tresult=cur.execute(\"select qq_number,qq_body,qq_correct_answer from quiz_questions_commit where qs_id=%s\",[id])\n\tquizes=cur.fetchall()\n\tx=0\n\n\t#if navigated to other questions, this will be triggered\n\tif int(qno)>0:\n\t\tsession['qidx']=int(qno)-1\n\t\treturn redirect(request.path)\n\tif session['qidx']==session['totalq']:\n\t\tapp.logger.info(session['qidx'])\n\t\treturn render_template('quiz_solve.html',mark=session['answers'].count('o'), qsid=id,qset=qset,question=None,choices=None,finished=True)\n\tquestion=quizes[session['qidx']]\n\n\t#if answer was clicked, move to next question\n\tif request.method=='POST':\n\t\tapp.logger.info(\"POST CALLED\")\n\t\tif request.form['btnrq']==\"Go back to your Dashboard\":\n\t\t\tapp.logger.info(\"finished test\")\n\t\t\treturn redirect(url_for('dashboard'))\n\t\t\t# return redirect(url_for(\"get_jsdata\"))\n\n\t\t#Get user answers and compare with the correct answer\n\t\tcor_answ=question['qq_correct_answer']\n\t\tif cor_answ=='s':\n\t\t\tcur.execute(\"select qa_answer from quiz_answers_commit where qs_id=%s and qq_number=%s\",([id],(session['qidx']+1)))\n\t\t\tcor_answ=cur.fetchone()['qa_answer']\n\t\tstd_answ=request.form['answerGroup']\n\t\tsession['useranswers'][session['qidx']]=std_answ\n\t\tapp.logger.info(session['useranswers'])\n\n\t\tif cor_answ==std_answ:\n\t\t\tsession['answers'][int(session['qidx'])]=\"o\"\n\t\telse:\n\t\t\tsession['answers'][int(session['qidx'])]=\"x\"\n\n\t\t#if reached the last question has been solved, end the quizes\n\t\tif session['qidx']+1==session['totalq']:\n\t\t\t#submit test result\n\t\t\tsubmit_test_result()\n\t\t\treturn render_template('quiz_solve.html',mark=session['answers'].count('o'), qsid=None,qset=qset,question=None,choices=None,finished=True)\n\t\tsession['qidx']+=1\n\n\t\tresult=cur.execute(\"select qq_number,qq_body,qq_correct_answer from quiz_questions_commit where qs_id=%s\",[id])\n\t\tquizes=cur.fetchall()\n\t\tquestion=quizes[session['qidx']]\n\t\tcur.execute(\"select b.qa_answer_number, b.qa_answer from quiz_answers_commit b where b.qs_id=%s and b.qq_number=%s\",([id],question['qq_number']))\n\t\tchoices=cur.fetchall()\n\t\tcur.close()\n\t\treturn render_template('quiz_solve.html',qsid=id,qset=qset,question=question,choices=choices,finished=False)\n\n\t# session['CorrectChoice']=quizes[session['qidx']]['qq_correct_answer']\n\tcur.execute(\"select b.qa_answer_number, b.qa_answer from quiz_answers_commit b where b.qs_id=%s and b.qq_number=%s\",(id,question['qq_number']))\n\tchoices=cur.fetchall()\n\n\tcur.close()\n\n\treturn render_template('quiz_solve.html',qsid=id,qset=qset,question=question,choices=choices,finished=False)\n@app.route('/quiz_template')\ndef quiz_examples():\n\treturn render_template('quiz_template.html')\n#\n@app.route('/quiz_template_original//')\ndef quiz_template_original(id):\n\tcur=mysql.connection.cursor()\n\tresult=cur.execute(\"select * from quiz_sets where qs_id=%s\",[id])\n\tquiz=cur.fetchone()\n\tcur.close()\n\treturn render_template('quiz_template_original.html',quiz=quiz)\n\n@app.context_processor\ndef override_url_for():\n return dict(url_for=dated_url_for)\n\ndef dated_url_for(endpoint, **values):\n if endpoint == 'static':\n filename = values.get('filename', None)\n if filename:\n file_path = os.path.join(app.root_path,\n endpoint, filename)\n values['q'] = int(os.stat(file_path).st_mtime)\n return url_for(endpoint, **values)\n\n@app.route('/users')\n@is_admin\ndef users():\n\tcur=mysql.connection.cursor()\n\ts=\"select username,firstname,lastname,email,mob,date_of_birth,phone,people_type from people where exists (select 1 from users where power_level='student' and users.username=people.username)\"\n\tresult=cur.execute(s)\n\tusers=cur.fetchall()\n\tcur.close()\n\treturn render_template('users.html',users=users)\n\n# @app.route('/posttestresult',methods=['POST'])\n# def get_post_js_data():\n# \tjsdata=request.form['javascript_data']\n# \ts=json.loads(jsdata)[0]\n# \tapp.logger.info(s)\n# \treturn s\n\n@app.route('/getdata', methods=['GET','POST'])\ndef get_jsdata():\n\n\tqs_id=session['qs_id']\n\tcorrect=session['answers'].count('o')\n\ttotal=session['totalq']\n\n\t#this variable holds correct/wrong info in a line of string such as oxoxoooxxxx\n\tanswers=\"\".join(session['answers'][0:total])\n\n\t#this variable holds a list of user's answers\n\tuseranswers=session['useranswers'][0:total]\n\tpu_id=session['pu_id']\n\tnum_attempt=0\n\tapp.logger.info(qs_id)\n\tapp.logger.info(correct)\n\tapp.logger.info(total)\n\tapp.logger.info(answers)\n\n\tcur=mysql.connection.cursor()\n\t# s=\"select count(qr_id) as n from quiz_record where qs_id={} and people_id={}\".format(qs_id,session['id'])\n\t# app.logger.info(s)\n\tresult=cur.execute(\"select count(qr_id) as n from quiz_record where qs_id=%s and pu_id=%s\",(qs_id,pu_id))\n\trs=cur.fetchone()\n\tnum_attempt=rs['n']+1\n\n\t#insert quiz results to quiz_record table\n\tss=\"insert into quiz_record(qs_id,qr_correct,qr_total,qr_attempt,qr_answered,people_id,pu_id) values ({},{},{},{},'{}',{},{})\".format(qs_id,correct,total,num_attempt,answers,session['id'],pu_id)\n\tapp.logger.info(ss)\n\tcur.execute(ss)\n\tmysql.connection.commit()\n\n\t#insert quiz and peopel_unit links\n\tresult=cur.execute(\"insert into people_unit_quiz_link (pu_id,qs_id,qr_id) values (%s,%s,last_insert_id())\",(pu_id,qs_id))\n\tmysql.connection.commit()\n\n\tcur.close()\n\tflash('Quiz Record update','success')\n\treturn redirect('dashboard')\n\ndef submit_test_result():\n\n\tqs_id=session['qs_id']\n\tcorrect=session['answers'].count('o')\n\ttotal=session['totalq']\n\n\t#this variable holds correct/wrong info in a line of string such as oxoxoooxxxx\n\tanswers=\"\".join(session['answers'][0:total])\n\n\t#this variable holds a list of user's answers\n\tuseranswers=session['useranswers'][0:total]\n\tpu_id=session['pu_id']\n\tnum_attempt=0\n\tapp.logger.info(qs_id)\n\tapp.logger.info(correct)\n\tapp.logger.info(total)\n\tapp.logger.info(answers)\n\n\tcur=mysql.connection.cursor()\n\t# s=\"select count(qr_id) as n from quiz_record where qs_id={} and people_id={}\".format(qs_id,session['id'])\n\t# app.logger.info(s)\n\tresult=cur.execute(\"select count(qr_id) as n from quiz_record where qs_id=%s and pu_id=%s\",(qs_id,pu_id))\n\trs=cur.fetchone()\n\tnum_attempt=rs['n']+1\n\n\t#insert quiz results to quiz_record table\n\tss=\"insert into quiz_record(qs_id,qr_correct,qr_total,qr_attempt,qr_answered,people_id,pu_id,percent) values ({},{},{},{},'{}',{},{},{})\".format(qs_id,correct,total,num_attempt,answers,session['id'],pu_id,(correct/total)*100)\n\tapp.logger.info(ss)\n\tcur.execute(ss)\n\tmysql.connection.commit()\n\n\t#get the quiz record ID which was just inserted to table\n\tcur.execute(\"set @qrid=last_insert_id()\")\n\tfor num,answer in enumerate(useranswers,start=1):\n\t\tapp.logger.info(\"useranswers=\"+answer)\n\t\tcur.execute(\"insert into quiz_user_answers (qr_id,question_number,user_answer) values (@qrid,%s,%s)\",(num,answer))\n\n\t#insert quiz and peopel_unit links\n\tresult=cur.execute(\"insert into people_unit_quiz_link (pu_id,qs_id,qr_id) values (%s,%s,@qrid)\",(pu_id,qs_id))\n\tmysql.connection.commit()\n\n\tcur.close()\n\tflash('Quiz Record update','success')\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/quiz_manager', methods=['GET', 'POST'])\ndef quiz_manager():\n\tcur=mysql.connection.cursor()\n\tresult=cur.execute(\"select u.id, u.unit_name,qs.* from units u left join quiz_sets qs on qs.unit_id=u.id order by u.id\")\n\trs=cur.fetchall()\n\n\tif request.method=='POST':\n\t\tif request.form['btnrq']=='Remove Quiz':\n\t\t\tqs_id=request.form['post_qid']\n\t\t\tapp.logger.info(\"qs_id:\"+qs_id)\n\t\t\tcur.execute(\"delete from quiz_sets where qs_id=\"+qs_id)\n\t\t\tflash(\"Quiz delete\",'success')\n\t\t\tmysql.connection.commit()\n\t\t\tcur.close()\n\t\t\treturn redirect(request.url)\n\t\t#Create new quiz_set\n\n\t\t# unit_id=request.form['post_id']\n\t\t# title=request.form['qtitle']\n\t\t# return redirect(url_for('quiz_Create_new_quiz',uid=unit_id,title=title))\n\t\tsession['unit_id']=request.form['post_id']\n\t\tsession['title']=request.form['qtitle']\n\t\treturn redirect(url_for('quiz_Create_new_quiz'))\n\treturn render_template('quiz_manager.html',qlist=rs)\n\n@is_logged_in\n@app.route('/MyProgress',methods=['GET','POST'])\ndef MyProgress():\n\tcur=mysql.connection.cursor()\n\n\n\n# def upload_file():\n# cur=mysql.connection.cursor()\n# result=cur.execute(\"select u.id, u.unit_name,qs.* from units u left join quiz_sets qs on qs.unit_id=u.id order by u.id\")\n# rs=cur.fetchall()\n# if request.method == 'POST':\n#\n# if request.form['btnrq']==\"Remove Quiz\":\n# qs_id=request.form['post_qid']\n# app.logger.info(\"qs_id:\"+qs_id)\n# cur.execute(\"delete from quiz_sets where qs_id=\"+qs_id)\n# flash(\"Quiz delete\",'success')\n# mysql.connection.commit()\n# \t\t\t cur.close()\n# return redirect(request.url)\n# elif 'file' not in request.files:\n# flash('No file part','danger')\n# return redirect(request.url)\n#\n# # check if the post request has the file part\n# file = request.files['file']\n# # if user does not select file, browser also\n# # submit a empty part without filename\n# if file.filename == '':\n# flash('No selected file','danger')\n# return redirect(request.url)\n# if file and allowed_file(file.filename):\n# filename = secure_filename(file.filename)\n# if \".json\" in filename:\n# app.config['UPLOAD_FOLDER']=UPLOAD_FOLDER_QUIZ\n#\n# #now insert the database to assign the question\n# buttonid=request.form['post_id']\n# cur.execute(\"insert into quiz_sets (qs_name,qs_type,unit_id,number_of_questions,quizfile) values (%s,%s,%s,10,%s)\", (filename.replace(\".json\",\"\"),\"Multi-choice\",buttonid,filename))\n# mysql.connection.commit()\n# file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n# flash('Quiz Data Uploaded','success')\n# else:\n# app.config['UPLOAD_FOLDER']=UPLOAD_FOLDER\n# file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n# flash('Image uploaded','success')\n# return redirect(url_for('uploaded_file',filename=filename))\n# cur.close()\n# return render_template('quiz_manager.html',qlist=rs)\n\n\n\n\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n # return send_from_directory(app.config['UPLOAD_FOLDER'],filename)\n\n return redirect('quiz_manager')\n\ndef get_pu_id(cid,uid,pid):\n\tcur=mysql.connection.cursor()\n\tresult=cur.execute(\"select pu_id from people_units where course_id=%s and unit_id=%s and people_id=%s\",(cid,uid,pid))\n\tdata=cur.fetchone()\n\tcur.close()\n\tif result==0:\n\t\treturn 0\n\treturn data['pu_id']\n\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\napp.secret_key='secret123'\nif __name__ == '__main__':\n\tapp.run(host='192.168.0.6')\n\t# app.run(debug=True)\n","repo_name":"wjeo001/ga","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":59804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26520144507","text":"from systeme.FondMarin import *\nfrom ui.PosiJauge import PosiJauge\nfrom ui.blocTexte import BlocTexte\nfrom ui.interrupteur import Interrupteur\nfrom reve.dimensions import getDimsCadre\n\ndef dessineInterrupteur(interrupteur: Interrupteur, x: int, y: int) -> list:\n \"\"\"Dessine un Interrupeteur.\n\n Args:\n interrupteur (Interrupteur): L'interrupteur à dessiner.\n x (int): Abscisse de l'origine de l'interrupteur.\n y (int): Ordonnée de l'origine de l'interrupteur.\n\n Returns:\n list: Les dimensions de l'interrupteur.\n \"\"\"\n interrupteur.dessine(x, y)\n return interrupteur.getDims()\n\ndef dessinePosiJauge(jauge: PosiJauge, x: int, y: int, longueurMax: int) -> list:\n \"\"\"Dessine une PosiJauge.\n\n Args:\n jauge (PosiJauge): La PosiJauge à dessiner.\n x (int): Abscisse de l'origine de la PosiJauge.\n y (int): Ordonnée de l'origine de la PosiJauge.\n longueurMax (int): La longueur maximale de la PosiJauge.\n\n Returns:\n list: Les dimensions de la PosiJauge.\n \"\"\"\n l = int(longueurMax-jauge.points[len(jauge.points)-1][0].getDims()[0]/2-jauge.points[0][0].getDims()[0]/2)\n jauge.dessine(x, y, l)\n return jauge.getDims()\n\ndef dessineTexte(texte: BlocTexte, x: int, y: int) -> list:\n \"\"\"Dessine un bloc de texte.\n\n Args:\n texte (BlocTexte): Le bloc de texte à dessiner.\n x (int): Abscisse de l'origine du bloc.\n y (int): Ordonnée de l'origine du bloc.\n\n Returns:\n list: Les dimensions du bloc.\n \"\"\"\n texte.dessine([[x, y], 'no'], alignement='g')\n return texte.getDims()\n\ndef dessineCadre(cadre: list, x: int, y: int, espace: int) -> list:\n \"\"\"Dessine un cadre.\n\n Args:\n cadre (list): Le cadre à dessiner.\n x (int): Abscisse de l'origine du cadre.\n y (int): Ordonnée de l'origine du cadre.\n espace (int): Espace entre les éléments internes du cadre.\n\n Returns:\n list: Les dimensions du cadre.\n \"\"\"\n h = getDimsCadre(cadre, espace)[1]\n draw_rectangle_rounded([x, y, cadre[0][0], h], 0.2, 30, cadre[0][1])\n ecart = int(xf*0.0125)\n x += ecart\n y += int(espace/2)\n for i in range(len(cadre)-1):\n element = cadre[i+1]\n if type(element) == BlocTexte:\n y += dessineTexte(element, x, int(y-espace*0.1))[1] + int(espace*0.5)\n elif type(element) == PosiJauge:\n y += dessinePosiJauge(element, x, int(y+espace/2), cadre[0][0]-ecart)[1] + espace\n elif type(element) == Interrupteur:\n y += dessineInterrupteur(element, x, y)[1] + int(espace/2)\n elif type(element) == list and type(element[0]) == list:\n y += dessineCadre(element, x, y, espace)[1] + int(espace/2)\n else:\n y += espace\n return [cadre[0][0], h]","repo_name":"engrenage13/Anovel","sub_path":"reve/dessin.py","file_name":"dessin.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"37871455012","text":"from flask import Flask,render_template,request\napp= Flask(__name__)\n\nclass Pessoa():\n def __init__(self,nome,idade,nascimento,cpf):\n self.nome=nome\n self.idade=idade\n self.nascimento=nascimento\n self.cpf=cpf\n\nlista=[Pessoa(\"Piske\",17,\"12/09/2001\",\"123.124.213-20\"), Pessoa(\"Ravi\",17,\"16/07/2001\",\"432.234.987.01\")]\n\n@app.route(\"/\")\ndef carai():\n return render_template(\"inicio.html\")\n\n@app.route(\"/addpessoa\")\ndef caramba():\n return render_template(\"adicionarpessoa.html\")\n\n\n\n@app.route(\"/listapessoa_sem_add\")\ndef caramb1():\n\n return render_template(\"listarpessoa.html\", So_cara_foda= lista)\n\n@app.route(\"/listapessoa\")\ndef caramb2():\n\n nome= request.args.get(\"Nome\")\n idade= request.args.get(\"Idade\")\n nasci= request.args.get(\"Nascimento\")\n cpf= request.args.get(\"Cpf\")\n val=0\n # comparar cpfs\n while val==0:\n for pessoa in lista:\n if cpf == pessoa.cpf:\n return render_template(\"erro_add_pessoa.html\")\n val=1\n \n lista.append(Pessoa(nome,int(idade),nasci,cpf))\n return caramb1()\n\n@app.route(\"/deletepessoa\")\ndef caramb3():\n\n cpf= request.args.get(\"Cpf\")\n for pessoa in lista:\n if cpf== pessoa.cpf:\n lista.remove(pessoa)\n break\n return render_template(\"mensagem.html\")\n\n\n@app.route(\"/form_alterar\")\ndef caramb4():\n\n cpf= request.args.get(\"Cpf\")\n for pessoa in lista:\n if cpf== pessoa.cpf:\n return render_template(\"form_alterar.html\", pessoa=pessoa)\n\n@app.route(\"/alterar_pessoa\")\ndef caramb5():\n\n nome= request.args.get(\"Nome\")\n idade= request.args.get(\"Idade\")\n nasci= request.args.get(\"Nascimento\")\n cpf= request.args.get(\"Cpf\")\n pessoa_alterada= Pessoa(nome,idade,nasci,cpf)\n for pessoa in range(len(lista)):\n if cpf== lista[pessoa].cpf:\n lista[pessoa]=pessoa_alterada\n return render_template(\"pessoa_alterada.html\")\n\n\n\napp.run(debug=True)\n","repo_name":"camillevilaca/prog2","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9809794646","text":"# 5176\n# 이진탐색\n\n# Testcast T\n# 1부터 N까지 자연수를 이진 탐색 트리에 저장, N\n\n# 가장 왼쪽 아래의 노드는\n# 완전 이진 트리의 레벨이 i 일 때 2^i 번\n\ndef complete_binary_tree(root):\n global value\n global N\n # 왼쪽 먼저, 루트, 오른쪽 순으로 처리해야 하니까 순서대로\n\n # root > N 일 때 return 해버리면, 왼쪽 노드에서 root가 N보다 커질 때 멈춰 버려서 오른쪽 검사 안할 수도 있음\n # root <= N 인 경우만 재귀함수가 돌게 되면, 일단 root <= N 까지만 tree[root] = value 가 실행됨\n # 그리고 그 뒤로 남은 root 들은 다시 root <= N 에 걸려서 실행되지 않고 \n # 딱 root <= N 까지만 트리가 만들어짐.\n if root <= N:\n complete_binary_tree(root*2)\n tree[root] = value\n value += 1\n complete_binary_tree(root*2 + 1)\n\n\nfor tc in range(1, int(input())+1):\n N = int(input())\n tree = [0] * (N+1)\n \n # 가장 작은 수부터 넣을 건데, 가장 왼쪽 아래부터 넣기\n value = 1 # 작은 수부터 넣을거니까\n complete_binary_tree(1)\n print(\"#{} {} {}\".format(tc, tree[1], tree[N//2]))\n\n ","repo_name":"blessisu/python_algorithm","sub_path":"SWEA/SWEA_5176.py","file_name":"SWEA_5176.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35578878383","text":"from queue import Empty\nfrom tkinter import IntVar\nfrom multiprocessing import Queue\nfrom threading import Thread, Event\nfrom typing import List\n\nfrom vib_music import StreamProcess\nfrom vib_music import StreamEvent, StreamEventType\nfrom vib_music import AudioStreamEvent, AudioStreamEventType\n\nVIB_TUNE_MODE = 'vibration_tune_mode'\n\nclass SliderHelperThread(Thread):\n def __init__(self, variable:IntVar, msg_queue:Queue, end_event:Event):\n super(SliderHelperThread, self).__init__()\n self.variable = variable\n self.msg_queue = msg_queue\n self.end_event = end_event\n \n def run(self) -> None:\n while not self.end_event.is_set():\n try:\n msg = self.msg_queue.get(block=True, timeout=0.1)\n except Empty:\n pass\n else:\n if 'pos' in msg.what:\n self.variable.set(msg.what['pos'])\n\nclass VibPlayBackend(object):\n def __init__(self, slider_var:IntVar, processes:List[StreamProcess]=[]):\n super(VibPlayBackend, self).__init__()\n self.audio_proc = processes[0] if len(processes) > 0 else None\n\n if self.audio_proc is None:\n return\n self.vib_processes = processes[1:]\n\n self.slider_var = slider_var \n self.sendQ, self.recvQ = Queue(), Queue()\n self.audio_proc.set_event_queues(self.sendQ, self.recvQ)\n for p in self.vib_processes:\n self.audio_proc.attach_vibration_proc(p)\n\n # prepare for GUI\n self.exit_event = Event()\n self.slider_thread = SliderHelperThread(slider_var, self.recvQ, self.exit_event)\n self.audio_proc.enable_GUI_mode()\n\n self._init_stream()\n try:\n msg = self.recvQ.get(block=True)\n except:\n self.total_frame = 1\n else:\n self.total_frame = msg.what['num_frame']\n \n # NOTE: must start the slider after we get the num frame\n self.slider_thread.start()\n\n self.is_running = True\n \n def has_audio_proc(self) -> bool:\n return self.audio_proc is not None\n \n def _init_stream(self) -> None:\n if not self.has_audio_proc(): return\n for p in self.vib_processes:\n p.start()\n self.audio_proc.start()\n # NOTE: audio process uses auto init here\n # self.sendQ.put(StreamEvent(head=StreamEventType.STREAM_INIT))\n \n def start_stream(self) -> None:\n self.sendQ.put(AudioStreamEvent(head=AudioStreamEventType.AUDIO_START))\n\n def close_stream(self) -> None:\n if not self.has_audio_proc(): return\n if not self.is_running: return\n\n self.sendQ.put(StreamEvent(head=StreamEventType.STREAM_CLOSE))\n\n self.audio_proc.join()\n print('audio process joined')\n for p in self.vib_processes:\n p.join()\n print('vibration process joined')\n self.exit_event.set()\n self.slider_thread.join()\n print('slider thread joined')\n self.is_running = False\n\n def pulse_stream(self) -> None:\n if not self.has_audio_proc(): return\n\n self.sendQ.put(AudioStreamEvent(head=AudioStreamEventType.AUDIO_PULSE))\n\n def resume_stream(self) -> None:\n if not self.has_audio_proc(): return\n\n self.sendQ.put(AudioStreamEvent(head=AudioStreamEventType.AUDIO_RESUME))\n\n def forward_stream(self) -> None:\n if not self.has_audio_proc(): return\n\n pos = self.slider_var.get()\n pos = min(self.total_frame, pos+100)\n self.slider_var.set(pos)\n self.sendQ.put(AudioStreamEvent(head=AudioStreamEventType.STREAM_SEEK, what={'pos': pos}))\n\n def backward_stream(self) -> None:\n if not self.has_audio_proc(): return\n\n pos = self.slider_var.get()\n pos = max(0, pos-100)\n self.slider_var.set(pos)\n self.sendQ.put(AudioStreamEvent(head=AudioStreamEventType.STREAM_SEEK, what={'pos': pos}))\n \n def vib_up(self) -> None:\n pass\n\n def vib_down(self) -> None:\n pass\n\n def seek_stream(self, where:int) -> None:\n if not self.has_audio_proc(): return\n\n self.sendQ.put(AudioStreamEvent(head=AudioStreamEventType.STREAM_SEEK, what={'pos': where}))\n","repo_name":"Ivan-wang/audiovibe","sub_path":"vib_editor/backends/VibPlayBackend.py","file_name":"VibPlayBackend.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70268534984","text":"import re\r\nfrom pathlib import Path\r\n\r\nif __name__ == \"__main__\":\r\n\r\n path = Path(r'C:\\Users\\Adam\\OneDrive\\Dokumenty\\txt2.txt')\r\n replace_words = {\r\n 'i': 'oraz',\r\n 'oraz': 'i',\r\n 'nigdy': 'prawie nigdy',\r\n 'dlaczego': 'czemu'\r\n }\r\n\r\n f = open(path, \"r\")\r\n inp = f.read()\r\n\r\n out = []\r\n for item in inp.split():\r\n if item in replace_words.keys():\r\n out.append(replace_words[item])\r\n else:\r\n out.append(item)\r\n\r\n out = \" \".join(out)\r\n\r\n f = open(path, \"w\")\r\n f.write(out)","repo_name":"adamzielina/python","sub_path":"Tekst/Podmienianie słów.py","file_name":"Podmienianie słów.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32706560861","text":"__author__ = 'AntonF'\n\n\"\"\"\nPoop ask for poop resistance\npoop ask for poop voltage\npoop calculate poop effect\npoop print poop result\n\"\"\"\n\n\ndef test():\n try:\n (float(vol) ** 2) / float(res)\n except ZeroDivisionError:\n print(\"Can't divide with 0.\")\n exit()\n except ValueError:\n print(\"An error has occurred check for non numerical inputs.\")\n exit()\n\n#Get Input Values\nres = input(\"Input resistance: \")\nvol = input(\"Input voltage: \")\n\nprint(\"Resistance = {0}, Voltage = {1}\".format(res, vol))\n\ntest()\n\n#Calc Power\npower = (float(vol) ** 2) / float(res)\n\n#Print Results\nprint(\"Effect = {0} W\".format(power))","repo_name":"Pinker/PinkWork","sub_path":"Effekten.py","file_name":"Effekten.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35444297602","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nimport sys\nimport Main as main_function\nimport config.Config as con #语言包,但是还没有被启用\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Dialog\")\n Dialog.resize(373, 162)\n\n self.args=[]\n\n self.label = QtWidgets.QLabel(Dialog)\n self.label.setGeometry(QtCore.QRect(20, 10, 161, 31))\n self.label.setObjectName(\"label\")\n self.pushButton = QtWidgets.QPushButton(Dialog)\n self.pushButton.setGeometry(QtCore.QRect(120, 110, 131, 41))\n self.pushButton.setObjectName(\"pushButton\")\n self.lineEdit = QtWidgets.QLineEdit(Dialog)\n self.lineEdit.setGeometry(QtCore.QRect(20, 50, 341, 41))\n self.lineEdit.setObjectName(\"lineEdit\")\n\n self.retranslateUi(Dialog)\n self.pushButton.clicked.connect(self.Button1Listern)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"快捷启动\"))\n self.label.setText(_translate(\"Dialog\", \"输入您希望附加的参数\"))\n self.pushButton.setText(_translate(\"Dialog\", \"确定\"))\n\n def Button1Listern(self):\n inputer = self.lineEdit.text()\n args_out = self.args[1]\n main_function.run_it(args_out,inputer)\n\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_Dialog()\n ui.setupUi(MainWindow)\n ui.args= sys.argv\n MainWindow.show()\n sys.exit(app.exec_())\n","repo_name":"JesusFu/windows_args_added","sub_path":"QuickStart.py","file_name":"QuickStart.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"14852928555","text":"from finetuning import *\nfrom build_vector_index_3 import create_splitter\n\n\ndef print_summaries(df):\n print(\"\\n\\n\")\n for s in df.summary.sample(5):\n print(s)\n\n\nif __name__ == \"__main__\":\n df = pd.read_parquet(f\"gs://{config.FINE_TUNE_TARGET_BUCKET}/{config.FINE_TUNE_COREF}\")\n print(df.columns)\n train, test = get_data_sets_df(df, test_instances=1000)\n splitter = create_splitter()\n train_aug = inject_noise(train, splitter)\n test_aug = inject_noise(test, splitter)\n print_summaries(train_aug)\n print(\"Uploading article snapshots\")\n target_url = f\"gs://{config.FINE_TUNE_TARGET_BUCKET}/{config.FINE_TUNE_FILE_PATTERN}\"\n train_aug.to_parquet(target_url.format(split=\"train\"), index=False)\n test_aug.to_parquet(target_url.format(split=\"test\"), index=False)\n","repo_name":"yw4401/FinBot","sub_path":"pipeline/finetuning_finalize.py","file_name":"finetuning_finalize.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32677153868","text":"from PySide import QtCore\nfrom PySide import QtGui\n\nfrom shiboken import wrapInstance\n\nimport maya.OpenMaya as OpenMaya\nimport maya.OpenMayaUI as OpenMayaUI\n\n# Import helper functions\nimport SwitchUnitsFunctions\nreload(SwitchUnitsFunctions)\n\n\n##############################################################################\n# Module definitions - constants\n##############################################################################\n\ncomboBoxItems = {\n 'Meters': \"Meters\",\n 'Centimeters': \"Centimeters\",\n 'Millimeters': \"Millimeters\"\n}\n\n\n##############################################################################\n# Helper functions\n##############################################################################\n\ndef getMayaMainWindow():\n ptr_mainWindow = OpenMayaUI.MQtUtil.mainWindow();\n \n return wrapInstance(long(ptr_mainWindow), QtGui.QWidget)\n \n \n##############################################################################\n# Class Definitions\n##############################################################################\n \nclass SwitchUnitsGUI(QtGui.QDialog):\n\n def __init__(self, parent=getMayaMainWindow()):\n \n super(SwitchUnitsGUI, self).__init__(parent)\n \n self.createUsDialog()\n self.setCurrentItemFromMaya()\n \n ##########################################################################\n # Class - GUI creation functions\n ##########################################################################\n\n def createUsDialog(self):\n \n self.setWindowTitle(\"Units Switcher\")\n #self.setWindowFlags(QtCore.Qt.Tool)\n self.setWindowFlags(QtCore.Qt.WindowModal)\n self.setAttribute(QtCore.Qt.WA_DeleteOnClose)\n \n self.createUsDialogControls()\n self.createUsDialogLayout()\n self.createSignalConnections()\n \n \n def createUsDialogControls(self):\n\n # Dialog label\n self.label = QtGui.QLabel(\"  Working Units\") \n self.label.setMargin(3)\n self.label.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)\n \n # ComboBox label\n self.label_SelectUnits = QtGui.QLabel(\"Linear:\")\n \n # ComboBox\n self.comboBox_SelectUnits = QtGui.QComboBox() \n self.comboBox_SelectUnits.addItem(comboBoxItems['Meters'])\n self.comboBox_SelectUnits.addItem(comboBoxItems['Centimeters'])\n self.comboBox_SelectUnits.addItem(comboBoxItems['Millimeters'])\n \n # Push buttons\n self.pushButton_ApplyAndClose = QtGui.QPushButton(\"Apply and Close\")\n self.pushButton_Apply = QtGui.QPushButton(\"Apply\")\n self.pushButton_Close = QtGui.QPushButton(\"Close\")\n \n \n def createUsDialogLayout(self):\n \n mainLayout = QtGui.QVBoxLayout()\n mainLayout.setContentsMargins(5, 5, 5, 5)\n mainLayout.setSpacing(0)\n \n # BEGIN - Double dialog frame (consists of two frames - Outher0 and Outher)\n self.frame_Outher0 = QtGui.QFrame()\n self.frame_Outher0.setFrameStyle(QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain)\n self.frame_Outher0.setLineWidth(1)\n \n layout_frame_Outher0 = QtGui.QVBoxLayout()\n layout_frame_Outher0.setContentsMargins(1, 1, 1, 1)\n layout_frame_Outher0.setSpacing(0)\n \n self.frame_Outher = QtGui.QFrame()\n self.frame_Outher.setFrameStyle(QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain)\n self.frame_Outher.setLineWidth(3)\n \n layout_frame_Outher = QtGui.QVBoxLayout()\n layout_frame_Outher.setContentsMargins(4, 4, 4, 4)\n layout_frame_Outher.setSpacing(0)\n \n # Add the dialog label\n layout_frame_Outher.addWidget(self.label)\n \n # BEGIN - Inner dialog frame\n self.frame_Inner = QtGui.QFrame()\n self.frame_Inner.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Sunken)\n \n layout_frame_Inner = QtGui.QGridLayout()\n\n # Add the combo box label\n layout_frame_Inner.addWidget(self.label_SelectUnits, 0, 0, QtCore.Qt.AlignRight)\n layout_frame_Inner.setColumnMinimumWidth(0, 100)\n \n # Add the combo box\n layout_frame_Inner.addWidget(self.comboBox_SelectUnits, 0, 1) \n layout_frame_Inner.setColumnMinimumWidth(1, 200)\n layout_frame_Inner.setColumnStretch(1, 1)\n \n self.frame_Inner.setLayout(layout_frame_Inner)\n # END - Inner dialog frame\n \n layout_frame_Outher.addWidget(self.frame_Inner)\n layout_frame_Outher.addStretch()\n \n self.frame_Outher.setLayout(layout_frame_Outher)\n \n layout_frame_Outher0.addWidget(self.frame_Outher)\n \n self.frame_Outher0.setLayout(layout_frame_Outher0)\n # END - Double dialog frame\n \n mainLayout.addWidget(self.frame_Outher0)\n \n self.buttonsLayout = QtGui.QHBoxLayout()\n self.buttonsLayout.setContentsMargins(0, 2, 0, 2)\n self.buttonsLayout.setSpacing(2)\n \n # Add the push buttons\n self.buttonsLayout.addWidget(self.pushButton_ApplyAndClose)\n self.buttonsLayout.addWidget(self.pushButton_Apply)\n self.buttonsLayout.addWidget(self.pushButton_Close)\n \n mainLayout.addLayout(self.buttonsLayout) \n \n self.setLayout(mainLayout)\n \n \n def createSignalConnections(self):\n \n self.pushButton_ApplyAndClose.clicked.connect(self.onClicked_pushButton_ApplyAndClose)\n self.pushButton_Apply.clicked.connect(self.onClicked_pushButton_Apply)\n self.pushButton_Close.clicked.connect(self.onClicked_pushButton_Close)\n \n \n ##########################################################################\n # Class - Slots\n ##########################################################################\n \n def onClicked_pushButton_ApplyAndClose(self):\n \n self.switchUnits()\n self.close()\n \n \n def onClicked_pushButton_Apply(self):\n \n self.switchUnits()\n \n \n def onClicked_pushButton_Close(self):\n \n self.close()\n\n\n ##########################################################################\n # Class - Helper functions\n ##########################################################################\n\n def switchUnits(self):\n \n currentIndex = self.comboBox_SelectUnits.currentIndex()\n currentText = self.comboBox_SelectUnits.currentText()\n \n if \"Meters\" == currentText:\n SwitchUnitsFunctions.switchUnitsToMeters()\n elif \"Centimeters\" == currentText:\n SwitchUnitsFunctions.switchUnitsToCentimeters()\n elif \"Millimeters\" == currentText:\n SwitchUnitsFunctions.switchUnitsToMillimeters()\n \n \n def setCurrentItemFromMaya(self):\n \"\"\"Sets the current item of the combo box to the current units)\"\"\"\n \n index = 0\n currentUnits = SwitchUnitsFunctions.getCurrentUnits()\n \n if OpenMaya.MDistance.kMeters == currentUnits:\n index = self.comboBox_SelectUnits.findText(comboBoxItems['Meters']) \n elif OpenMaya.MDistance.kCentimeters == currentUnits:\n index = self.comboBox_SelectUnits.findText(comboBoxItems['Centimeters'])\n elif OpenMaya.MDistance.kMillimeters == currentUnits:\n index = self.comboBox_SelectUnits.findText(comboBoxItems['Millimeters'])\n \n self.comboBox_SelectUnits.setCurrentIndex(index)","repo_name":"Zingam/MayaPythons","sub_path":"source/UnitsSwitcher/plug-ins/UsCommands/SwitchUnitsGUI.py","file_name":"SwitchUnitsGUI.py","file_ext":"py","file_size_in_byte":7551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19769223749","text":"import os,sys\nfrom math import *\nfrom cnn import *\nimport torch \nimport torch.utils.data\nfrom torch.autograd import *\nfrom data_reader import *\nfrom parser import *\nfrom tools import *\n\n## Global:\nFig_y = 48\nFig_x = 48\nClassify = 7\nmode = 0\n\n## data processing func:\ndef train_fx(ipt):\n\tglobal Classify,Fig_y,Fig_x\n\t#print (ipt[0,1])\n\tlabel = np.array(ipt[:,0],dtype=np.int)\n\t#print (label)\n\t#print (np.shape(ipt[:,1].flatten()))\n\t\n\tipt = np.array([np.array(ipt[t,1].split(' '),dtype=np.float) for t in range(len(ipt))])\n\n\t#print(np.shape(ipt))\n\n\t#exit(1)\n#.reshape(len(ipt),Fig_y,Fig_x)\n\t#print (ipt)\n\t#exit(1)\n\tipt = [label,ipt] \n\treturn ipt\n\nif len(sys.argv) < 2:\n\tprint (\"test.py <.prop> \") \n\texit(1)\n\npars = Parser()\npars.parse(sys.argv[1])\n\nID = pars.val['ID']\nmodel_dir = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)),'model'),ID)\n\n## data:\nraw = Data_reader()\nraw.Read_csv(sys.argv[2],'big5',train_fx)\n\n## data processing:\ntrain_x = torch.DoubleTensor(raw.data[1]/255).view(-1,1,Fig_y,Fig_x)\n#train_y = torch.from_numpy(raw.data[0]).int()\n\ndel raw\n\n\n## build model :\nmodel = cnn_fix_v8(Fig_y,Fig_x,Classify)\nmodel.load_state_dict(torch.load(os.path.join(model_dir,'cnn.model')))\n\nprint (\"Total: %d\"%(len(train_x)))\n\n#model.cuda()\nmodel.eval()\nBatch_sz = 128\nres = []\nfor i in np.arange(0,len(train_x),Batch_sz):\n\tprint (i)\n\tif i+Batch_sz > len(train_x):\n\t\tpred = model.forward(Variable(train_x[i:]))\n\telse:\n\t\tpred = model.forward(Variable(train_x[i:i+Batch_sz]))\n\t#pred.cpu()\n\tpred = np.argmax(pred.data.numpy(),axis=1)\n\tres = np.hstack((res,pred))\n\n\n#print (len(res))\n\n\n##save path :\nsav_path = sys.argv[3]\n\n#f = open(os.path.join(model_dir,'Out.csv'),'w')\nf = open(sav_path,'w')\nf.write('id,label\\n')\nfor l in range(len(res)):\n\tf.write('%d,%d\\n'%(l,res[l]))\nf.close()\n\n\n\n\n\n\n","repo_name":"kaihsin/ML2017FALL","sub_path":"hw3/test_v8.py","file_name":"test_v8.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37690485175","text":"\"\"\"\nyou are given an array prices where prices[i] is the price of a given stock on the ith day.\n\nYou want to maximize your profit by choosing a single day to buy one stock and choosing a different day in the future to sell that stock.\n\nReturn the maximum profit you can achieve from this transaction. If you cannot achieve any profit, return 0.\n\n \n\nExample 1:\n\nInput: prices = [7,1,5,3,6,4]\nOutput: 5\nExplanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.\nNote that buying on day 2 and selling on day 1 is not allowed because you must buy before you sell.\nExample 2:\n\nInput: prices = [7,6,4,3,1]\nOutput: 0\nExplanation: In this case, no transactions are done and the max profit = 0.\n \n\nConstraints:\n\n1 <= prices.length <= 105\n0 <= prices[i] <= 104\n\n\n\"\"\"\n\n\n\"\"\"\n Input: prices = [7,1,5,3,6,4]\n \n find lowest price first, then find highest price\n if price trends down return 0 since there is no max profit available\n \n \n [7,3,5,1,6,4]\n ^\n ^\n L R\n if the \n \n\n\n\"\"\"\n\n \ndef buyLowSellHigh(arr):\n \n l, r = 0, 1\n maxP = 0\n # Remeber this is to calculate the MAX Profit between two dates\n # L = buy low and R = Sell High\n # how do you calculate that? there is a function you can use native to python to find it\n while r < len(arr):\n if arr[l] < arr[r]:\n profit = arr[r] - arr[l]\n maxP = max(profit, maxP)\n else:\n l = r\n \n r += 1\n \n \n \n return maxP\n\n\n\n\nprices = [7,1,5,3,6,4]\n\nres = buyLowSellHigh(prices)\n\nprint(res)","repo_name":"cwesta/leetCode","sub_path":"bestTimeToSell.py","file_name":"bestTimeToSell.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72929326346","text":"class Solution:\n def isValid(self, s: str) -> bool:\n table = {']': '[', '}': '{', ')': '('}\n stack = []\n\n for char in s:\n if char in '({[':\n stack.append(char)\n else:\n if not stack or stack[-1] != table[char]:\n return False\n stack.pop()\n\n return len(stack) == 0\n ","repo_name":"boorooksus/Algorithm-Study","sub_path":"LeetCode/4회차/B20_Valid Parentheses.py","file_name":"B20_Valid Parentheses.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26619876334","text":"mais18 = mulheres = homens = 0\r\nwhile True:\r\n sexo = str(input('Qual é o sexo dessa pessoa? [M/F] ')).upper()[0]\r\n while sexo not in 'MmFf':\r\n print('RESPOSTA INVÁLIDA')\r\n sexo = str(input('Qual é o sexo dessa pessoa? [M/F] ')).upper()[0]\r\n idade = int(input('Qual é a idade dessa pessoa? '))\r\n if idade >= 18 :\r\n mais18 += 1\r\n if idade < 20 and sexo == 'F':\r\n mulheres += 1\r\n if sexo == 'M':\r\n homens += 1\r\n cont = str(input('Você quer continuar? [S/N] '))\r\n while cont not in 'SsNn':\r\n print('RESPOSTA INVÁLIDA')\r\n cont = str(input('Você quer continuar? [S/N] ')).upper()[0]\r\n if cont in 'Nn':\r\n break\r\nprint(f'{ mais18 } pessoas tem mais de 18 anos \\ntem { mulheres } mulheres menores de 20 anos \\ne tem { homens } homens.')","repo_name":"KaueGuimaraes/Python-Begin-Exercises","sub_path":"ex069(nao_revisado).py","file_name":"ex069(nao_revisado).py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72842233225","text":"import configparser\nfrom itertools import groupby\nimport pandas as pd\nimport numpy as np\n\n\n\"\"\"找出前N大回撤\"\"\"\ndef get_top_n_drawdown(drawdown_arr, n=6):\n dd = drawdown_arr.abs()\n in_drawdown = zero_series(drawdown_arr)\n in_drawdown[dd != 0] = 1\n in_drawdown_list = get_signal_interval(in_drawdown)\n\n drawdown_record = []\n for start, end in in_drawdown_list:\n cur_drawdown = dd.loc[start: end].max()\n idx_max = dd.loc[start: end].idxmax()\n drawdown_record.append([start, idx_max, end, cur_drawdown])\n\n tb = pd.DataFrame(drawdown_record, columns=['start', 'max_date', 'end', 'drawdown']).sort_values(['drawdown'], ascending=False).reset_index(drop=True)\n return tb.iloc[:n]\n\ndef get_signal_interval(arr, ):\n \"\"\"\n arr 是一个只有 0, 1 的序列,本函数返回一个列表:每一段连续的 1 的开始和结束\n :param arr:\n :return:\n \"\"\"\n arr_cumsum = cumsum_with_reset(arr, reset=0).replace(0, np.nan)\n start_idx = list(arr_cumsum[arr_cumsum == 1].index)\n end_idx = list(arr_cumsum[arr_cumsum.notna() & arr_cumsum.shift(-1).isna()].index)\n try:\n assert len(start_idx) == len(end_idx)\n return list(zip(start_idx, end_idx))\n except AssertionError:\n end_idx.append(arr.index[-1])\n assert len(start_idx) == len(end_idx)\n return list(zip(start_idx, end_idx))\n\n\n\ndef cumsum_with_reset(arr: pd.Series, reset=0):\n \"\"\"\n 当遇到 == reset 的元素时,cumsum 归零;\n\n Args:\n arr:\n reset:\n\n Returns: pd.Series\n \"\"\"\n if reset is None:\n v = arr.copy()\n else:\n v = arr.replace(reset, np.nan)\n cumsum = v.cumsum().fillna(method='pad')\n reset = -cumsum[v.isnull()].diff().fillna(cumsum)\n result = v.where(v.notnull(), reset).cumsum()\n return result\n\n\ndef empty_series(arr):\n try:\n return pd.Series(index=arr.index, dtype=float)\n except TypeError:\n return pd.Series(index=range(len(arr)), dtype=float)\n\n\ndef zero_series(arr):\n return empty_series(arr).fillna(0)\n\n\n\"\"\"list_to_arr_form\"\"\"\ndef list_to_arr_form(arr, interval_list):\n res = zero_series(arr)\n for start, end in interval_list:\n res.loc[start: end] = 1\n return res\n\n\n\n\"\"\"滚动窗口\"\"\"\ndef rolling_df(df, n, step=1, min_window_size=None):\n if min_window_size is None:\n idx_start = n\n for i in range(idx_start, df.shape[0] + 1, step):\n try:\n yield df.iloc[i-n: i]\n except:\n yield df[i - n: i]\n else:\n idx_start = min_window_size\n for i in range(idx_start, df.shape[0] + 1, step):\n try:\n if i >= n:\n yield df.iloc[i-n: i]\n else:\n yield df.iloc[: i]\n except:\n if i >= n:\n yield df[i-n: i]\n else:\n yield df[: i]\n\n\n\"\"\"滚动日期窗口,Panel data\"\"\"\ndef rolling_dates(df, n=244, date_col=None,interval=1, min_window_size=None):\n if date_col is None:\n date_arr = df.index.drop_duplicates()\n assert date_arr.is_monotonic\n df_group_indices = df.groupby(df.index).indices\n else:\n date_arr = df[date_col].drop_duplicates()\n assert date_arr.is_monotonic\n df_group_indices = df.groupby(date_col).indices\n\n for date_list in rolling_df(date_arr, n=n, step=interval, min_window_size=min_window_size):\n sub_list = []\n for date_idx in date_list:\n idx = df_group_indices[date_idx]\n sub_list.append(idx)\n yield df.iloc[np.concatenate(sub_list)]\n\n\ndef export_to_ini(array, dst, section=None):\n if section is None:\n section = 'Value'\n cf = configparser.ConfigParser()\n cf[section] = {}\n for idx, r in array.iteritems():\n dt = idx.strftime('%Y%m%d')\n cf[section][dt] = str(r)\n with open(dst, 'w') as file:\n cf.write(file)\n file.close()\n print('Done!')\n\n\ndef export_df_to_ini(df, dst,):\n df = df.copy().astype(str)\n df.index = df.index.strftime('%Y%m%d')\n df_dict = df.to_dict(orient='index')\n cf = configparser.ConfigParser()\n cf.optionxform = str\n cf.read_dict(df_dict)\n with open(dst, 'w') as configfile:\n cf.write(configfile)\n configfile.close()\n print('Done!')\n\n\n# check elements all identical\ndef all_equal(iterable):\n g = groupby(iterable)\n return next(g, True) and not next(g, False)\n\n\ndef quick_backtest(price: pd.Series, signal: pd.Series, shift=True, mode=None):\n assert price.shape == signal.shape\n if mode is None:\n mode = 'diff'\n else:\n if mode not in ['diff', 'simple', 'cumprod']:\n raise ValueError('未知的mode')\n\n # 当有信号的时候, 做空,无信号的时候,不做\n # 信号都是次日可交易\n if shift:\n pos = signal.shift().fillna(0)\n else:\n pos = signal\n pos = 0 * (pos == 0) + 1 * (pos == 1) + (-1) * (pos == -1)\n\n if mode == 'diff':\n pct_change = price.diff()\n else:\n pct_change = (price - price.shift()) / price.shift().abs()\n\n if mode == 'cumprod':\n pnl = (1 + (pct_change * pos)).cumprod()\n else:\n pnl = (pct_change * pos).cumsum()\n return pnl\n\n\ndef cal_metrics(pnl: pd.Series)->dict:\n pnl = pnl.copy()\n pnl = pnl.dropna()\n res = dict()\n res['收益%'] = (pnl.iloc[-1] - pnl.iloc[0]) * 100\n res['年化收益%'] = pnl.diff().mean() * 244 * 100\n res['年化波动%'] = pnl.diff().std() * np.sqrt(244) * 100\n res['sharpe'] = res['年化收益%'] / res['年化波动%']\n res['最大回撤%'] = abs((pnl - pnl.expanding().max()).min()) * 100\n res['最大回撤开始'] = pnl.loc[:(pnl - pnl.expanding().max()).idxmin()].idxmax()\n res['最大回撤结束'] = (pnl - pnl.expanding().max()).idxmin()\n res['收益回撤比'] = res['年化收益%'] / res['最大回撤%']\n return res\n","repo_name":"zhenghaobaby/cta_factors_test","sub_path":"auxiliary/utitilies.py","file_name":"utitilies.py","file_ext":"py","file_size_in_byte":5952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38781600372","text":"#!/home/vlarobbyk/anaconda3/envs/ai python\n\n__author__ = \"vlarobbyk\"\n__copyright__ = \"Grupo de Investigación en Inteligencia Artificial y Tecnologías de Asistencia\"\n__credits__ = [\"V. Robles-Bykbaev\"]\n__license__ = \"GPL\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"vlarobbyk\"\n__email__ = \"vrobles@ups.edu.ec\"\n__status__ = \"Production\"\n\n\nimport tkinter as tk\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\n\nfrom Utilities import Utilities\n\nclass ShapeVisualizer(tk.Frame):\n \n def __init__(self, root):\n tk.Frame.__init__(self,root)\n self.root = root\n self.utilities = Utilities()\n self.init_components()\n self.root.minsize(640,480)\n \n def init_components(self):\n entries = self.utilities.list_corpus('train/')\n sorted(entries, key = lambda str: str.split('/')[-1].split('.')[0])\n l1 = ttk.Label(self.root,text = 'Seleccione una Figura =>', font = ('Verdana',14))\n l1.grid(column = 0, row = 0)\n \n widthc, heightc = 1024, 768\n self.comboBoxShape = ttk.Combobox(self.root, values = entries, font = ('Verdana',14))\n self.comboBoxShape.grid(column = 1, row = 0, columnspan = 3)\n self.comboBoxShape.bind('<>', self.comboShapeSelected)\n \n self.canvas = tk.Canvas(self.root, width = widthc, height = heightc)\n self.canvas.grid(column = 0, row = 1, columnspan = 3 )\n self.canvas.configure(background = 'white')\n self.photo = ImageTk.PhotoImage(Image.open('Logo-GIIATa-small.png'))\n self.img = self.canvas.create_image(widthc/2,heightc/2, anchor = tk.CENTER,image = self.photo)\n \n def comboShapeSelected(self, event):\n print(self.comboBoxShape.get())\n image = Image.open(self.comboBoxShape.get())\n self.photo = ImageTk.PhotoImage(image)\n self.canvas.itemconfig(self.img,image = self.photo)\n\nif __name__==\"__main__\":\n root = tk.Tk()\n root.title('Shape Visualizer - UPS')\n shapev = ShapeVisualizer(root)\n root.mainloop()\n \n \n \n","repo_name":"vlarobbyk/shape-recognition-ANN","sub_path":"ShapeVisualizer.py","file_name":"ShapeVisualizer.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10330352689","text":"\"\"\"\nType annotations for polly service client paginators.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_polly/paginators.html)\n\nUsage::\n\n ```python\n import boto3\n\n from mypy_boto3_polly import PollyClient\n from mypy_boto3_polly.paginator import (\n DescribeVoicesPaginator,\n ListLexiconsPaginator,\n ListSpeechSynthesisTasksPaginator,\n )\n\n client: PollyClient = boto3.client(\"polly\")\n\n describe_voices_paginator: DescribeVoicesPaginator = client.get_paginator(\"describe_voices\")\n list_lexicons_paginator: ListLexiconsPaginator = client.get_paginator(\"list_lexicons\")\n list_speech_synthesis_tasks_paginator: ListSpeechSynthesisTasksPaginator = client.get_paginator(\"list_speech_synthesis_tasks\")\n ```\n\"\"\"\nfrom typing import Iterator\n\nfrom botocore.paginate import Paginator as Boto3Paginator\n\nfrom .literals import EngineType, LanguageCodeType, TaskStatusType\nfrom .type_defs import (\n DescribeVoicesOutputTypeDef,\n ListLexiconsOutputTypeDef,\n ListSpeechSynthesisTasksOutputTypeDef,\n PaginatorConfigTypeDef,\n)\n\n__all__ = (\"DescribeVoicesPaginator\", \"ListLexiconsPaginator\", \"ListSpeechSynthesisTasksPaginator\")\n\nclass DescribeVoicesPaginator(Boto3Paginator):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/polly.html#Polly.Paginator.DescribeVoices)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_polly/paginators.html#describevoicespaginator)\n \"\"\"\n\n def paginate(\n self,\n *,\n Engine: EngineType = None,\n LanguageCode: LanguageCodeType = None,\n IncludeAdditionalLanguageCodes: bool = None,\n PaginationConfig: PaginatorConfigTypeDef = None\n ) -> Iterator[DescribeVoicesOutputTypeDef]:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/polly.html#Polly.Paginator.DescribeVoices.paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_polly/paginators.html#describevoicespaginator)\n \"\"\"\n\nclass ListLexiconsPaginator(Boto3Paginator):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/polly.html#Polly.Paginator.ListLexicons)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_polly/paginators.html#listlexiconspaginator)\n \"\"\"\n\n def paginate(\n self, *, PaginationConfig: PaginatorConfigTypeDef = None\n ) -> Iterator[ListLexiconsOutputTypeDef]:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/polly.html#Polly.Paginator.ListLexicons.paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_polly/paginators.html#listlexiconspaginator)\n \"\"\"\n\nclass ListSpeechSynthesisTasksPaginator(Boto3Paginator):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/polly.html#Polly.Paginator.ListSpeechSynthesisTasks)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_polly/paginators.html#listspeechsynthesistaskspaginator)\n \"\"\"\n\n def paginate(\n self, *, Status: TaskStatusType = None, PaginationConfig: PaginatorConfigTypeDef = None\n ) -> Iterator[ListSpeechSynthesisTasksOutputTypeDef]:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/polly.html#Polly.Paginator.ListSpeechSynthesisTasks.paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_polly/paginators.html#listspeechsynthesistaskspaginator)\n \"\"\"\n","repo_name":"chrishollinworth/vscode-boto3-intellisense","sub_path":"typings/mypy_boto3_polly/paginator.pyi","file_name":"paginator.pyi","file_ext":"pyi","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37936897245","text":"from pytube.extract import video_id, playlist_id\nfrom pytube.exceptions import RegexMatchError\n\n\ndef get_type(uri):\n try:\n id_video = video_id(uri)\n type_video = 'video'\n except RegexMatchError:\n try:\n id_video = playlist_id(uri)\n type_video = 'playlist'\n except RegexMatchError:\n id_video = None\n type_video = None\n return type_video\n","repo_name":"disasstor/ytdl-flask","sub_path":"utils/get_video_type.py","file_name":"get_video_type.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34869441995","text":"from aiogram import types\nfrom aiogram.dispatcher.filters import BoundFilter\n\nfrom utils.db_api.commands.users_cmds import get_user_by_telegram_id\nfrom utils.db_api.models import Users\n\n\nclass NotBanned(BoundFilter):\n async def check(self, update: [types.Message, types.CallbackQuery]) -> bool:\n\n if isinstance(update, types.Message):\n telegram_id = update.chat.id\n elif isinstance(update, types.CallbackQuery):\n telegram_id = update.message.chat.id\n else:\n return True # Пользователь не забанен\n\n # Получаем пользователя\n user: Users = await get_user_by_telegram_id(telegram_id)\n\n if user:\n if user.restricted:\n return False # Пользователь забанен\n else:\n return True # Пользователь не забанен\n else:\n return True # Пользователь не забанен\n","repo_name":"SunAdmirer/WB-bot","sub_path":"filters/not_banned.py","file_name":"not_banned.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37247525645","text":"# Fibonacci!!\n# DFS bottom up\n\n\nclass Solution:\n\n def climbStairs(self, n: int) -> int:\n one, two = 1, 1\n\n for i in range(n - 1):\n # Why is the range starts from 0 and go to n-2?\n # 因為間隔為n-1,有n-1步要處理\n temp = one\n one = one + two\n two = temp\n return one","repo_name":"YiruDing/LeetcodePractice","sub_path":"Neetcode305/1-D_Dynamic_Programming/70_Climbing_Stairs.py","file_name":"70_Climbing_Stairs.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72134270984","text":"from bs4 import BeautifulSoup\nimport boto3\nimport json\nimport os\n\nfrom parse_funcs import chart_gen\n\n\nif __name__ == \"__main__\":\n client = boto3.client('s3') # low-level functional API\n resource = boto3.resource('s3') # high-level object-oriented API\n html_bucket = resource.Bucket('billboard-charts-html') # subsitute this for your s3 bucket name.\n json_bucket = resource.Bucket('billboard-charts-json')\n\n prefix = 'hot-100'\n\n files = list(html_bucket.objects.filter(Prefix=prefix))\n\n for file in files:\n\n json_name = file.key.split('/')[-1].replace('html', 'json')\n\n file = file.get()['Body'].read().decode(\"utf-8\")\n soup = BeautifulSoup(file, \"html.parser\")\n\n try:\n chart = chart_gen(soup)\n\n with open(json_name, 'w') as json_file:\n json.dump(chart, json_file)\n\n json_bucket.upload_file(json_name, Key=prefix + '/' + json_name)\n os.remove(json_name)\n\n except:\n pass\n","repo_name":"fredabood/billboard","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3508983439","text":"def create_csv_file(dir):\n # Zunächst werden die Pfade definiert. Bitte ergänzen Sie die Zeile für das Test.csv\n csv_path_train = os.path.join(dir, \"Training.csv\")\n csv_path_test = os.path.join(dir, \"Test.csv\")\n\n # Jetzt werden die Dateien erstellt und die Spaltennamen geschrieben\n # Training\n with open(csv_path_train, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow([\"ImageId\", \"PredictionString\"])\n # Test\n with open(csv_path_test, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow([\"ImageId\", \"PredictionString\"])\n\n # Anzeigen der verschiedenen Klassen -> Es werden nur die Ordner verwendet\n list_of_classes = [element for element in os.listdir(dir) if not os.path.isfile(os.path.join(dir, element))]\n print(f\"Liste aller Klassen: {str(list_of_classes)}\")\n\n # Iteriere über alle Klassen\n for current_class in list_of_classes:\n class_dir = os.path.join(dir, current_class)\n # Liste aller .jpg Dateien im Ordner der aktuellen Klasse\n #for file in os.listdir(class_dir)\n list_of_files = [file for file in os.listdir(class_dir) if os.path.isfile(os.path.join(class_dir, file)) and file.endswith(\".jpg\")]\n \n # Dateien zufällig mischen\n random.seed(0)\n random.shuffle(list_of_files)\n\n # Dateien in Trainings- und Testdaten splitten\n train_end = int(len(list_of_files) * 0.75)\n train_files = list_of_files[:train_end]\n test_files = list_of_files[train_end:]\n\n # Hier werden die Dateiname in die .csv Dateien geschrieben\n # Training\n with open(csv_path_train, 'a', newline='') as file:\n writer = csv.writer(file)\n for file in train_files:\n writer.writerow([file, current_class])\n\n # Test\n with open(csv_path_test, 'a', newline='') as file:\n writer = csv.writer(file)\n for file in test_files:\n writer.writerow([file, current_class])\n\n\n# Verzeichnis, in dem der Datensatz liegt\ninput_dir = 'AKIPRO_safety_application_dataset'\n\n# Erstellen der .csv Dateien\ncreate_csv_file(input_dir)","repo_name":"KI-Campus/KI-und-Industrie-4.0","sub_path":"AKIPro/Sicherheitskritische_Anwendungen/Loesung/Loesung_3_1.py","file_name":"Loesung_3_1.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13714657829","text":"from typing import List, Tuple\n\nfrom transformers.tokenization_utils import PreTrainedTokenizer # type: ignore\n\n\nclass TextPreprocessor():\n \"\"\" Class in charge of the text preprocessing process.\n \"\"\"\n\n def __init__(self, tokenizer: PreTrainedTokenizer):\n self.__tokenizer: PreTrainedTokenizer = tokenizer\n self.__max_model_length = self.__tokenizer.model_max_length or 512\n self.section_points: List[int] = [0]\n\n def preprocess(self, sentences: List[str]) -> List[List[str]]:\n \"\"\" Groups the sentences in sections suitable to the tokenizer limits.\n\n Args:\n sentences (List[str]): A list of sentences.\n\n Returns:\n List[List[str]]: A list that contains a list per section.\n Each inner list contains the sentences of that section.\n \"\"\"\n sentences_tokens: List[int] = [\n len(self.__tokenizer.encode(sentence)) for sentence in sentences]\n\n print(sentences_tokens)\n\n prev_section_points, _ = self.__sentences_compactator(sentences_tokens)\n\n print(prev_section_points)\n\n if len(prev_section_points) < 2:\n return [sentences]\n\n self.section_points = self.__balance_sections(\n sentences_tokens, prev_section_points)\n\n print(self.section_points)\n\n return [sentences[self.section_points[i]:self.section_points[i+1]]\n for i in range(len(self.section_points) - 1)]\n\n def __sentences_compactator(self, sentences_tokens: List[int]) -> Tuple[List[int], List[int]]:\n # pylint: disable=line-too-long\n \"\"\" Given a list with the number of tokens per sentence the function groups\n the sentences in sections that don't surpass the model limit.\n Original concept: https://github.com/dmlls/jizt-tfg/blob/b566f22a1714408893ca7a590da8e9d1ff18068f/src/services/t5_large_text_encoder/text_encoding.py#L153\n\n Args:\n sentences_tokens (List[str]): A list with the number of tokens per sentence.\n\n Returns:\n Tuple[List[int], List[int]]: A tuple is composed by two lists.\n The first list contains the starting points of each section in the sentences list.\n The second list contains the number of tokens of each section.\n \"\"\"\n section_points: List[int] = [0]\n sections_tokens: List[int] = [0]\n\n # We loop over the tokens per sentence grouping the sentences\n # in sections that don't surpass the model limit.\n for i in range(len(sentences_tokens)):\n n_sentence_tokens = sentences_tokens.pop(0)\n if sections_tokens[-1] + n_sentence_tokens < self.__max_model_length:\n sections_tokens[-1] += n_sentence_tokens\n else:\n sections_tokens.append(n_sentence_tokens)\n section_points.append(i)\n\n return (section_points, sections_tokens)\n\n def __balance_sections(self, sentences_tokens: List[int],\n prev_section_points: List[int]) -> List[int]:\n # pylint: disable=line-too-long\n \"\"\" Given a list with the number of tokens per sentence and a list\n with the previous starting points of the sections, the function tries\n to balance the number of tokens that compose each section.\n Original concept: https://github.com/dmlls/jizt-tfg/blob/b566f22a1714408893ca7a590da8e9d1ff18068f/src/services/t5_large_text_encoder/text_encoding.py#L196\n\n Args:\n sentences_tokens (List[str]): A list with the number of tokens per sentence.\n prev_section_points (List[int]): A list with the starting points of each section\n in the sentences list.\n\n Returns:\n List[int]: A list with the optimized starting points of each section\n in the sentences list.\n \"\"\"\n new_sect_points = prev_section_points[:]\n new_sect_points.append(len(sentences_tokens))\n\n while True:\n temp_sect_points = new_sect_points[:]\n\n # From the n - 1 section to 0.\n for i in range(len(new_sect_points) - 2, 0, -1):\n # The number of tokens of the previous section.\n sect_1_size = sum(\n sentences_tokens[temp_sect_points[i-1]:temp_sect_points[i]])\n\n # The number of tokens of the current section.\n sect_2_size = sum(\n sentences_tokens[temp_sect_points[i]:temp_sect_points[i+1]])\n\n # The number of tokens of the previous section if we move\n # the last sentence of the previous section to the current section.\n new_sect_1_size = sum(\n sentences_tokens[temp_sect_points[i-1]:(temp_sect_points[i] - 1)])\n\n # The number of tokens of the current section if we move\n # the last sentence of the previous section to the current section.\n new_sect_2_size = sum(\n sentences_tokens[(temp_sect_points[i] - 1):temp_sect_points[i+1]])\n\n # We calculate the maximum value of the differences between the previous\n # the current sections with the model limit. If this value is bigger than\n # the value we would get if we move the last sentence of the previous\n # section to the correct one, then we move the point who marks the start\n # of the actual section.\n if (max(self.__max_model_length - sect_1_size,\n self.__max_model_length - sect_2_size) >\n max(self.__max_model_length - new_sect_1_size,\n self.__max_model_length - new_sect_2_size)):\n temp_sect_points[i] -= 1\n\n # If the sections points haven't changed since the last iteration, we end the loop.\n if new_sect_points != temp_sect_points:\n new_sect_points = temp_sect_points\n else:\n break\n\n return new_sect_points[:-1]\n","repo_name":"MrpYA45/github-text-mining-tfg","sub_path":"src/backend/gtmprocessing/gtmprocessing/logic/utils/textpreprocessor.py","file_name":"textpreprocessor.py","file_ext":"py","file_size_in_byte":6062,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"5317201524","text":"__author__='SeanPark_ViaSat'\r\n\r\nimport unittest\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\nimport datetime\r\nimport time\r\nimport string\r\nimport random\r\nimport openpyxl\r\nimport pandas as pd\r\nimport xlsxwriter\r\nimport xlwt\r\nimport os\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport selenium.webdriver.support.ui as ui\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\n\r\nclass NewOrder(unittest.TestCase):\r\n\r\n bankRoutingNumber = '122105278'\r\n bankAccountNumber = '0000000016'\r\n paymentType = ''\r\n\r\n def setUp(self):\r\n self.driver = webdriver.Chrome(\"C:\\\\Selenium\\\\chromedriver.exe\")\r\n\r\n self.wb = openpyxl.load_workbook('NewConnectOrders.xlsx')\r\n\r\n self.wbAddress = openpyxl.load_workbook(('./Data/Addresses.xlsx'))\r\n self.sheetAddress = self.wbAddress['Sheet1']\r\n self.username = self.sheetAddress.cell(row=4, column=2).value\r\n self.password = self.sheetAddress.cell(row=4, column=3).value\r\n self.salesChannel = self.sheetAddress.cell(row=4, column=4).value\r\n self.customerType = self.sheetAddress.cell(row=4, column=5).value\r\n self.failures = []\r\n\r\n def test_firstTest(self):\r\n\r\n driver = self.driver\r\n\r\n driver.get(\"https://ordermgmt.test.exede.net/PublicGUI-SupportGUI/v1/pages/addcustomer/serviceAvailability.xhtml\")\r\n\r\n print(\"driver.title : \" + driver.title)\r\n\r\n self.assertIn(\"\", driver.title)\r\n\r\n if driver.title != \"Test\":\r\n self.failures.append(driver.title + '!=' + \"Test\")\r\n\r\n driver.implicitly_wait(50)\r\n\r\n driver.set_page_load_timeout(30)\r\n\r\n driver.get(\"https://ordermgmt.test.exede.net/PublicGUI-SupportGUI/v1/pages/addcustomer/serviceAvailability.xhtml\")\r\n\r\n driver.maximize_window()\r\n\r\n driver.implicitly_wait(20)\r\n\r\n driver.find_element_by_xpath(\"//*[@id=\\\"document:body\\\"]/table/tbody/tr[2]/td/form/table/tbody/tr[3]/td[2]/input\").send_keys(self.username)\r\n\r\n driver.find_element_by_xpath(\r\n \"//*[@id=\\\"document:body\\\"]/table/tbody/tr[2]/td/form/table/tbody/tr[4]/td[2]/input\").send_keys(self.password)\r\n\r\n driver.find_element_by_name(\"submit\").click()\r\n\r\n driver.implicitly_wait(5)\r\n\r\n self.addCustomerTab = WebDriverWait(driver, 10).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"addCustomerForm:add\"]'))\r\n )\r\n\r\n self.addCustomerTab.click()\r\n\r\n for item in range(0, 1):\r\n\r\n driver.implicitly_wait(3)\r\n time.sleep(1)\r\n\r\n if self.salesChannel == 'WB_DIRECT':\r\n driver.find_element_by_xpath(\"//*[@id=\\\"addCustomerForm:salesChannelMenu\\\"]/option[2]\").click()\r\n\r\n # salesChannelOption = selectSalesChannel(salesChannel)\r\n # select = Select(driver.find_element_by_id('addCustomerForm:salesChannelMenu'))\r\n #\r\n # select.select_by_visible_text(salesChannel).click()\r\n\r\n now = datetime.datetime.now()\r\n\r\n currentYear = str(now.year)\r\n\r\n months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']\r\n\r\n hexdigits = list(string.hexdigits)\r\n del hexdigits[10:16]\r\n\r\n # print(hexdigits)\r\n\r\n randomMac = \"AA:BB:CC:\"\r\n\r\n for x in range(0, 6):\r\n randomNumber = random.choice(hexdigits)\r\n randomMac = randomMac + randomNumber\r\n if x % 2 != 0 and len(randomMac) < 17:\r\n randomMac = randomMac + \":\"\r\n\r\n print(\"Mac Address : \" + randomMac)\r\n\r\n self.randomMacNoColon = randomMac.replace(':', '')\r\n\r\n print(self.randomMacNoColon)\r\n\r\n currentMonth = months[now.month - 1]\r\n\r\n currentDay = \"\"\r\n\r\n if now.day < 10:\r\n currentDay = '0' + str(now.day)\r\n else:\r\n currentDay = str(now.day)\r\n\r\n transactionReference = \"SPark_\" + currentDay + currentMonth + currentYear + str(item)\r\n\r\n self.newSheet = currentDay + \"-\" + currentMonth + \"-\" + currentYear\r\n\r\n self.wb.create_sheet(self.newSheet)\r\n\r\n self.ws = self.wb[self.newSheet]\r\n\r\n self.ws.cell(row=1, column=2).value = 'Transaction Reference'\r\n self.ws.cell(row=1, column=3).value = 'Service Agreement'\r\n self.ws.cell(row=1, column=4).value = 'MAC'\r\n self.ws.cell(row=2, column=2).value = transactionReference\r\n self.ws.cell(row=2, column=4).value = randomMac\r\n\r\n driver.implicitly_wait(2)\r\n driver.find_element_by_xpath(\"//*[@id=\\\"addCustomerForm:transactionReference\\\"]\").send_keys(\r\n transactionReference)\r\n\r\n # driver.implicitly_wait(2)\r\n # driver.find_element_by_xpath(\"//*[@id=\\\"addCustomerForm:namesIdName1\\\"]\").send_keys(\"Spider\")\r\n\r\n time.sleep(1)\r\n firstNameField = WebDriverWait(driver, 30).until(\r\n EC.presence_of_element_located((By.ID,\r\n 'addCustomerForm:namesIdName1'))\r\n )\r\n\r\n firstNameField.send_keys(\"Spider\")\r\n\r\n driver.implicitly_wait(2)\r\n\r\n time.sleep(1)\r\n lastNameField = WebDriverWait(driver, 30).until(\r\n EC.presence_of_element_located((By.ID,\r\n 'addCustomerForm:namesIdName3'))\r\n )\r\n\r\n lastNameField.send_keys(\"Man\")\r\n\r\n time.sleep(1)\r\n\r\n driver.implicitly_wait(2)\r\n driver.find_element_by_xpath(\"//*[@id=\\\"addCustomerForm:addressIdMaybeTableAddress1\\\"]\").send_keys(\r\n \"12017 E Lake Cir\")\r\n time.sleep(1)\r\n\r\n driver.implicitly_wait(2)\r\n driver.find_element_by_xpath(\"//*[@id=\\\"addCustomerForm:addressIdMaybeTableCity\\\"]\").send_keys(\"Englewood\")\r\n time.sleep(1)\r\n\r\n driver.implicitly_wait(2)\r\n driver.find_element_by_xpath(\r\n \"//*[@id=\\\"addCustomerForm:addressIdMaybeTableStateAddressState\\\"]/option[7]\").click()\r\n time.sleep(1)\r\n\r\n driver.implicitly_wait(2)\r\n driver.find_element_by_xpath(\"//*[@id=\\\"addCustomerForm:addressIdMaybeTableZip\\\"]\").send_keys(\"80111\")\r\n time.sleep(1)\r\n\r\n driver.implicitly_wait(2)\r\n driver.find_element_by_xpath(\"//*[@id=\\\"addCustomerForm:primaryPhoneIdMaybeTablePhoneNumber\\\"]\").send_keys(\r\n \"7204823823\")\r\n time.sleep(1)\r\n\r\n driver.implicitly_wait(2)\r\n driver.find_element_by_xpath(\"//*[@id=\\\"addCustomerForm:emailAddressId\\\"]\").send_keys(\r\n \"sean.park@viasat.com\")\r\n time.sleep(1)\r\n\r\n driver.implicitly_wait(2)\r\n driver.find_element_by_xpath(\"//*[@id=\\\"addCustomerForm:Birthdate\\\"]\").send_keys(\"12/15/1973\")\r\n time.sleep(1)\r\n\r\n driver.implicitly_wait(2)\r\n driver.find_element_by_xpath(\"//*[@id=\\\"addCustomerForm:nextButtonId\\\"]\").click()\r\n time.sleep(1)\r\n\r\n # Contacts Page\r\n\r\n # creditCheckPassed = WebDriverWait(driver, 60).until(\r\n # EC.presence_of_element_located((By.XPATH, '// *[@id = \"addCustomerForm:_id93\"]/tbody/tr/td/span'))\r\n # )\r\n\r\n # print(\"creditCheckPassed? : \" + creditCheckPassed.text)\r\n\r\n time.sleep(5)\r\n\r\n assert \" \" in driver.page_source\r\n\r\n driver.implicitly_wait(2)\r\n\r\n time.sleep(1)\r\n\r\n customerReferenceField = WebDriverWait(driver, 30).until(\r\n EC.presence_of_element_located((By.ID,\r\n 'addCustomerForm:customerReference'))\r\n )\r\n\r\n customerReferenceField.send_keys(transactionReference)\r\n\r\n driver.implicitly_wait(2)\r\n\r\n time.sleep(1)\r\n accountReferenceField = WebDriverWait(driver, 30).until(\r\n EC.presence_of_element_located((By.ID,\r\n 'addCustomerForm:accountReference'))\r\n )\r\n\r\n accountReferenceField.send_keys(transactionReference)\r\n\r\n driver.implicitly_wait(2)\r\n time.sleep(1)\r\n driver.find_element_by_xpath(\"//*[@id=\\\"addCustomerForm:nextButtonId\\\"]\").click()\r\n\r\n # Packages Page\r\n\r\n packagesTitle = WebDriverWait(driver, 20).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"addCustomerForm:packagesHeaderLabel\"]'))\r\n )\r\n\r\n # Sometimes, radio button is not checked by default. So intentionally click it.\r\n # id is dynamically created. id114 or id112\r\n # driver.find_element_by_xpath('//*[@id=\"addCustomerForm:_id114:_2\"]').click()\r\n # driver.find_element_by_xpath('//*[@id=\"addCustomerForm:_id112:_2\"]').click()\r\n\r\n time.sleep(1)\r\n\r\n packageRadioButtonFirst = WebDriverWait(driver, 20).until(\r\n EC.presence_of_element_located((By.XPATH, '//input[starts-with(@value, \"$\")]'))\r\n )\r\n\r\n # \"is not clickable at point\" error. Another element is covering the element to click. I could use execute_script() to click on this.\r\n nextButton = driver.find_element_by_xpath('//*[@id=\"addCustomerForm:nextButtonId\"]')\r\n driver.execute_script(\"arguments[0].click();\", nextButton)\r\n\r\n driver.implicitly_wait(2)\r\n time.sleep(1)\r\n\r\n # Options Page\r\n optionsTitle = WebDriverWait(driver, 10).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"addCustomerForm:optionsLabel\"]'))\r\n )\r\n\r\n print(\"optionsTitle : \" + optionsTitle.text)\r\n\r\n driver.implicitly_wait(2)\r\n time.sleep(1)\r\n driver.find_element_by_xpath(\"//*[@id=\\\"addCustomerForm:_1selectionPackages:_1\\\"]\").click()\r\n\r\n driver.implicitly_wait(2)\r\n time.sleep(1)\r\n # driver.find_element_by_xpath('//*[@id=\"addCustomerForm:nextButtonId\"]').click()\r\n\r\n optionsPageNextButton = driver.find_element_by_xpath('//*[@id=\"addCustomerForm:nextButtonId\"]')\r\n driver.execute_script(\"arguments[0].click();\", optionsPageNextButton)\r\n\r\n driver.implicitly_wait(2)\r\n time.sleep(1)\r\n\r\n # Payment Page\r\n paymentMethodTitle = WebDriverWait(driver, 10).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"addCustomerForm:recurringPaymentInfoLabel\"]'))\r\n )\r\n\r\n driver.implicitly_wait(2)\r\n time.sleep(1)\r\n driver.find_element_by_xpath(\r\n \"//*[@id=\\\"addCustomerForm:recurringPaymentIdRecurringPaymentMethodIdTableCreditCardIdcreditCardTypeId\\\"]/option[3]\").click()\r\n\r\n driver.implicitly_wait(2)\r\n time.sleep(1)\r\n driver.find_element_by_xpath(\r\n \"//*[@id=\\\"addCustomerForm:recurringPaymentIdRecurringPaymentMethodIdTableCreditCardIdNumberId\\\"]\").send_keys(\r\n \"4012000077777777\")\r\n\r\n driver.implicitly_wait(2)\r\n time.sleep(1)\r\n driver.find_element_by_xpath(\r\n \"//*[@id=\\\"addCustomerForm:recurringPaymentIdRecurringPaymentMethodIdTableCreditCardIdExpireMonthIdMonthId\\\"]/option[5]\").click()\r\n\r\n driver.implicitly_wait(2)\r\n time.sleep(1)\r\n driver.find_element_by_xpath(\r\n '//*[@id=\"addCustomerForm:recurringPaymentIdRecurringPaymentMethodIdTableCreditCardIdExpireYearIdYearId\"]/option[3]').click()\r\n\r\n driver.implicitly_wait(2)\r\n time.sleep(1)\r\n driver.find_element_by_xpath(\r\n '//*[@id=\"addCustomerForm:recurringPaymentIdRecurringPaymentMethodIdTableCreditCardIdFirstNameId\"]').send_keys(\r\n \"VISA\")\r\n\r\n driver.implicitly_wait(2)\r\n time.sleep(1)\r\n driver.find_element_by_xpath(\r\n '//*[@id=\"addCustomerForm:recurringPaymentIdRecurringPaymentMethodIdTableCreditCardIdLastNameId\"]').send_keys(\r\n \"APPROVAL\")\r\n\r\n driver.implicitly_wait(2)\r\n time.sleep(1)\r\n driver.find_element_by_xpath(\r\n '//*[@id=\"addCustomerForm:recurringPaymentIdRecurringPaymentMethodIdTableCreditCardIdAddressZip\"]').send_keys(\r\n \"80111\")\r\n\r\n driver.implicitly_wait(2)\r\n time.sleep(1)\r\n driver.find_element_by_xpath('//*[@id=\"addCustomerForm:nextButtonId\"]').click()\r\n\r\n # Review Page\r\n\r\n self.scheduleButton = WebDriverWait(driver, 10).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"addCustomerForm:scheduleInstallationButtonId\"]'))\r\n )\r\n\r\n self.scheduleButton.click()\r\n time.sleep(1)\r\n\r\n # Schedule Page\r\n\r\n self.submitOrderButton = WebDriverWait(driver, 10).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"addCustomerForm:submitButtonId\"]'))\r\n )\r\n\r\n self.submitOrderButton.click()\r\n time.sleep(0.3)\r\n\r\n # wait for order reference number created\r\n\r\n # Confirmation Page\r\n\r\n printButton = WebDriverWait(driver, 180).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"addCustomerForm:printButtonId\"]'))\r\n )\r\n\r\n self.serviceAgreementReference = driver.find_element_by_xpath(\r\n '//*[@id=\"addCustomerForm:serviceAgreementReference\"]').text\r\n\r\n print(\"Sales Channel : \" + self.salesChannel)\r\n print(\"External Account Reference : \" + self.serviceAgreementReference)\r\n\r\n self.newOrderButton = WebDriverWait(driver, 10).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"addCustomerForm:newOrderButtonId\"]'))\r\n )\r\n\r\n self.newOrderButton.click()\r\n\r\n driver.implicitly_wait(20)\r\n time.sleep(1)\r\n\r\n self.transactionInfoTitle = WebDriverWait(driver, 10).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"addCustomerForm:transactionInfoLabel\"]'))\r\n )\r\n\r\n print(\"transactionInfoTitle : \" + self.transactionInfoTitle.text)\r\n driver.get('https://spyglass01.test.wdc1.wildblue.net:8443/SpyGlass/')\r\n\r\n self.referenceType = WebDriverWait(driver, 10).until(\r\n EC.presence_of_element_located((By.XPATH,\r\n '/html/body/table/tbody/tr[2]/td/div/div/div[1]/div/div/form/div/table/tbody/tr/td[1]/select/option[5]'))\r\n )\r\n\r\n self.referenceType.click()\r\n\r\n self.referenceValue = WebDriverWait(driver, 10).until(\r\n EC.presence_of_element_located((By.XPATH,\r\n '/html/body/table/tbody/tr[2]/td/div/div/div[1]/div/div/form/div/table/tbody/tr/td[2]/input'))\r\n )\r\n\r\n self.referenceValue.send_keys(transactionReference)\r\n\r\n self.externalSystem = WebDriverWait(driver, 10).until(\r\n EC.presence_of_element_located((By.XPATH,\r\n '/html/body/table/tbody/tr[2]/td/div/div/div[1]/div/div/form/div/table/tbody/tr/td[3]/div/select/option[2]'))\r\n )\r\n\r\n self.externalSystem.click()\r\n\r\n driver.implicitly_wait(20)\r\n time.sleep(1)\r\n\r\n driver.find_element_by_xpath(\r\n '/html/body/table/tbody/tr[2]/td/div/div/div[1]/div/div/form/div/table/tbody/tr/td[4]/input[1]').click()\r\n\r\n self.fsmCustomerCode = WebDriverWait(driver, 10).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"datatable\"]/tbody/tr[1]/td[1]/div[1]'))\r\n )\r\n\r\n self.serviceAgreementNumber = driver.find_element_by_xpath(\r\n '//*[@id=\"data\"]/table[1]/tbody/tr[2]/td/table/tbody/tr[2]/td[12]').text\r\n\r\n print('serviceAgreementNumber : ' + self.serviceAgreementNumber)\r\n\r\n driver.save_screenshot('./Reports/' + self.serviceAgreementNumber + '.png')\r\n\r\n driver.implicitly_wait(20)\r\n time.sleep(1)\r\n\r\n self.ws.cell(row=2, column=3).value = self.serviceAgreementNumber\r\n\r\n #######################################\r\n ###\r\n ### Provisioning Starting from here....\r\n ###\r\n #######################################\r\n\r\n # driver = webdriver.Chrome(\"C:\\\\Selenium\\\\chromedriver.exe\")\r\n # driver = webdriver.Ie(\"C:\\\\Selenium\\\\IEDriverServer.exe\")\r\n\r\n # driver.set_page_load_timeout(30)\r\n\r\n time.sleep(6)\r\n\r\n # driver.get(\"https://ordermgmt.test.exede.net/PublicGUI-SupportGUI/v1/pages/addcustomer/serviceAvailability.xhtml\")\r\n #\r\n # driver.get('https://spyglass01.test.wdc1.wildblue.net:8443/SpyGlass/')\r\n\r\n self.installGUI = \"https://igui-installationgui.test.wdc1.wildblue.net/InternalGUI-InstallationGUI/\"\r\n\r\n self.installGUIwithMac = self.installGUI + \"?n=\" + self.randomMacNoColon\r\n\r\n driver.get(self.installGUIwithMac)\r\n\r\n # serviceAgreementNumber = '402907978'\r\n\r\n self.screenshotDirectory = './Reports/' + self.serviceAgreementNumber + '_' + driver.name\r\n\r\n if not os.path.exists(self.screenshotDirectory):\r\n os.makedirs(self.screenshotDirectory)\r\n\r\n driver.maximize_window()\r\n\r\n time.sleep(3)\r\n\r\n print(\"Web Browser in test : \" + driver.name)\r\n\r\n ### if it's IE, it needs to bypass security warning\r\n if driver.name == \"internet explorer\":\r\n print(\"driver is IE\")\r\n self.continueLink = driver.find_element_by_id('overridelink')\r\n self.continueLink.click()\r\n\r\n driver.implicitly_wait(20)\r\n\r\n self.activationKeyField = WebDriverWait(driver, 60).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"installerForm:activationKey\"]'))\r\n )\r\n\r\n self.activationKeyField.send_keys(self.serviceAgreementNumber)\r\n\r\n self.installButton = WebDriverWait(driver, 60).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"installerForm:j_id36\"]'))\r\n )\r\n\r\n driver.save_screenshot(self.screenshotDirectory + '/1_welcomeToServiceActivation.png')\r\n\r\n self.installButton.click()\r\n\r\n time.sleep(2)\r\n\r\n self.installerNumberField = WebDriverWait(driver, 60).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"installerForm:installerId\"]'))\r\n )\r\n\r\n self.installerNumberField.send_keys(\"99072761\")\r\n\r\n driver.save_screenshot(self.screenshotDirectory + '/2_customerConfirmationNewInstallation.png')\r\n\r\n self.continueInstallButton = WebDriverWait(driver, 60).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"installerForm:j_id53\"]'))\r\n )\r\n\r\n self.continueInstallButton.click()\r\n\r\n time.sleep(5)\r\n\r\n self.emailConfirmationButton = WebDriverWait(driver, 60).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"installerForm:j_id30\"]'))\r\n )\r\n\r\n driver.save_screenshot(self.screenshotDirectory + '/3_emailConfirmationAndUpdate.png')\r\n\r\n self.emailConfirmationButton.click()\r\n\r\n time.sleep(10)\r\n\r\n # qOIcontinueButton = WebDriverWait(driver, 60).until(\r\n # EC.presence_of_element_located((By.XPATH, '//*[@id=\"installerForm:j_id50\"]'))\r\n # )\r\n\r\n self.thankYouTag = WebDriverWait(driver, 60).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"installerForm:j_id40\"]'))\r\n )\r\n\r\n print('thankYouTag Text : ' + self.thankYouTag.text)\r\n\r\n self.qOIcontinueButton = WebDriverWait(driver, 60).until(\r\n EC.presence_of_element_located((By.XPATH, '//input[@type=\"submit\"]'))\r\n )\r\n\r\n driver.save_screenshot(self.screenshotDirectory + '/4_qualityOfInstall.png')\r\n\r\n self.qOIcontinueButton.click()\r\n\r\n time.sleep(6)\r\n\r\n self.customerButton = WebDriverWait(driver, 60).until(\r\n EC.element_to_be_clickable((By.XPATH, '//input[@value=\"Customer\"]'))\r\n )\r\n\r\n self.customerButton.click()\r\n\r\n driver.save_screenshot(self.screenshotDirectory + '/5_newCustomerAccountSetup.png')\r\n\r\n self.lastFourField = WebDriverWait(driver, 60).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"installerForm:paymentAuthentication\"]'))\r\n )\r\n\r\n self.lastFourField.send_keys(\"7777\")\r\n\r\n self.ccContinueButton = WebDriverWait(driver, 60).until(\r\n EC.presence_of_element_located((By.XPATH, '//input[@value=\"Continue\"]'))\r\n )\r\n\r\n self.ccContinueButton.click()\r\n\r\n time.sleep(5)\r\n\r\n # driver.switch_to_frame(1)\r\n\r\n self.pdfIFrame = driver.find_element_by_xpath('//*[@id=\"installerForm:j_id20\"]/iframe')\r\n\r\n # print(pdfIFrame.get_attribute('src'))\r\n\r\n driver.switch_to_default_content()\r\n\r\n driver.switch_to_frame(self.pdfIFrame)\r\n\r\n time.sleep(5)\r\n\r\n driver.save_screenshot(self.screenshotDirectory + '/6_customerAgreement.png')\r\n\r\n time.sleep(2)\r\n\r\n self.getStartedButton = WebDriverWait(driver, 60).until(\r\n EC.element_to_be_clickable((By.XPATH, '//*[@id=\"pnlElectronic\"]/div/div[1]/button[1]/i'))\r\n )\r\n\r\n print(\"getStartedButtonAttribute : \" + self.getStartedButton.get_attribute('class'))\r\n\r\n self.getStartedButton.click()\r\n\r\n time.sleep(3)\r\n\r\n self.signField = driver.find_element_by_xpath('//*[@id=\"location1\"]/div[2]/div[1]/input')\r\n\r\n print(self.signField.get_attribute('type'))\r\n\r\n self.signField.send_keys(\"Spider Man\")\r\n\r\n time.sleep(3)\r\n\r\n driver.save_screenshot(self.screenshotDirectory + '/7_customerAgreementAfterSign.png')\r\n\r\n self.finishSubmitButton = WebDriverWait(driver, 60).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"completePopupContainer\"]/div/div[1]/button'))\r\n )\r\n\r\n self.finishSubmitButton.click()\r\n\r\n time.sleep(2)\r\n\r\n driver.save_screenshot(self.screenshotDirectory + '/8_eSignSubmitted.png')\r\n\r\n time.sleep(2)\r\n\r\n driver.switch_to_default_content()\r\n\r\n self.continueButtonAfterSign = WebDriverWait(driver, 60).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"installerForm:j_id25\"]'))\r\n )\r\n\r\n print('continueButtonAfterSign attribute : ' + self.continueButtonAfterSign.get_attribute('class'))\r\n\r\n driver.save_screenshot(self.screenshotDirectory + '/9_eSignComplete.png')\r\n\r\n self.continueButtonAfterSign.click()\r\n\r\n time.sleep(3)\r\n\r\n driver.save_screenshot(self.screenshotDirectory + '/10_activatingModem.png')\r\n\r\n self.activatingModemContinueButton = WebDriverWait(driver, 60).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"installerForm:j_id35\"]'))\r\n )\r\n\r\n self.activatingModemContinueButton.click()\r\n\r\n time.sleep(2)\r\n\r\n self.confirmationMessage = WebDriverWait(driver, 60).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"installerForm:j_id19\"]'))\r\n )\r\n\r\n driver.save_screenshot(self.screenshotDirectory + '/11_confirmation.png')\r\n\r\n if \"Success!\" not in driver.page_source:\r\n self.failures.append(\"Success! not in \" + driver.page_source)\r\n\r\n # assert(self.failures == [], str(self.failures))\r\n\r\n def teatDown(self):\r\n\r\n self.wb.save('NewConnectOrders.xlsx')\r\n\r\n self.driver.close()\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n\r\n\r\n\r\n\r\n","repo_name":"park1215/SmokeTestRepo","sub_path":"untitled/SeleniumScripts/NewOrder.py","file_name":"NewOrder.py","file_ext":"py","file_size_in_byte":24663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6001478255","text":"from __future__ import absolute_import\n\nimport os\nimport urlparse\n\nfrom celery.result import ResultSet\nfrom django.db import transaction\n\nfrom brain.kaltura import get_entry_metadata\nfrom brain.kaltura import get_entry_thumbnail\nfrom brain.KalturaUpload import update_tags\nfrom brain.models import Media\nfrom brain.models import Status\nfrom brain.ReKImages import tag_objects\nfrom brain.ReKImages import tag_people\nfrom brain.recognition import get_fb_user\nfrom brain.recognition import get_fb_photos\nfrom brain.recognition import clean_training_state\nfrom brain.recognition import filter_fb_photos_for_training\nfrom brain.recognition import process_fb_photo as _process_fb_photo\nfrom brain.recognition import recognise_unknown_photo\nfrom brain.recognition import train_fb_photos\nfrom brain.recognition import upload_fb_photos_for_training as _upload_fb_photos_for_training\nfrom brain.voicebase import post_entry\nfrom brain.voicebase import get_keywords\nfrom djv import settings\nfrom djv.celery import app\n\n\n@app.task(name='brain.tasks.add')\ndef add(x, y):\n return x + y\n\n\n@app.task(name='brain.tasks.initialise_fb_user')\ndef initialise_fb_user(domain_uri, access_token):\n fb_user = get_fb_user(access_token)\n group_name = fb_user.id\n\n photos = get_fb_photos(access_token)\n\n if settings.USE_ASYNC:\n results = ResultSet([process_fb_photo.delay(d, access_token) for d in photos['data']])\n processed_photos = [p for photos in results.join() for p in photos]\n else:\n processed_photos = [process_fb_photo(d, access_token) for d in photos['data']]\n processed_photos = [p for photos in processed_photos for p in photos]\n\n filtered_photos = filter_fb_photos_for_training(processed_photos)\n media_uri = urlparse.urljoin(domain_uri, settings.MEDIA_URL)\n\n if settings.USE_ASYNC:\n results = ResultSet([upload_fb_photos_for_training.delay([p], group_name, media_uri) for p in filtered_photos])\n results.join()\n else:\n upload_fb_photos_for_training(filtered_photos, group_name, media_uri)\n\n train_fb_photos(group_name)\n\n# clean_training_state(processed_photos)\n\n\n@transaction.atomic\ndef save_service_status(entry_id, service, state, message=''):\n status, _ = Status.objects.get_or_create(media=Media.objects.get(id=entry_id),\n service=service)\n status.state = state\n status.message = message\n status.save()\n\n\n@app.task(name='brain.tasks.think')\ndef think(entry_id, services, domain_uri):\n # get Face++ group name if service is turned on\n group_name = None\n if services.get('facepp'):\n group_name = get_fb_user(services['facepp']).id\n\n # async get keywords from VoiceBase\n # only do this for async mode\n if services.get('voicebase') and settings.USE_ASYNC:\n save_service_status(entry_id, 'VOICEBASE', 'PROGRESS')\n generate_voice_keyword_tags.delay(entry_id)\n\n # get image samplings from Kaltura video\n images = generate_image_samplings_from_kaltura(entry_id)\n\n # process object recognition\n # process facial recognition\n is_generate_object_tags = services.get('stockpodium', False)\n is_generate_friend_tags = services.get('facepp', False)\n\n if is_generate_object_tags:\n save_service_status(entry_id, 'STOCKPODIUM', 'PROGRESS')\n if is_generate_friend_tags:\n save_service_status(entry_id, 'FACEPP', 'PROGRESS')\n\n results = []\n for i in images:\n image_url = urlparse.urljoin(domain_uri, settings.MEDIA_URL + i)\n if is_generate_object_tags:\n if settings.USE_ASYNC:\n results.append(generate_object_tags.delay(entry_id, image_url))\n else:\n generate_object_tags(entry_id, image_url)\n if is_generate_friend_tags:\n if settings.USE_ASYNC:\n results.append(generate_friend_tags.delay(entry_id, image_url, group_name))\n else:\n generate_friend_tags(entry_id, image_url, group_name)\n\n # wait for tagging to be complete\n if results:\n ResultSet(results).join()\n\n save_service_status(entry_id, 'STOCKPODIUM', 'SUCCESS')\n save_service_status(entry_id, 'FACEPP', 'SUCCESS')\n\n # clean generate image samplings\n for i in images:\n f = os.path.join(settings.MEDIA_ROOT, i)\n if os.path.isfile(f):\n os.unlink(f)\n\n\n@app.task(name='brain.tasks.generate_object_tags')\ndef generate_object_tags(entry_id, image_url):\n tags = tag_objects(entry_id, image_url)\n update_tags(entry_id, tags)\n\n\n@app.task(name='brain.tasks.generate_friend_tags')\ndef generate_friend_tags(entry_id, image_url, group_name):\n candidate = recognise_unknown_photo(group_name, image_url)\n if candidate is not None:\n update_tags(entry_id, [candidate])\n\n@app.task(name='brain.tasks.wait_for_voice_keywords',\n max_retries=5,\n default_retry_delay=30)\ndef wait_for_voice_keywords(entry_id):\n tags = get_keywords(entry_id)\n if tags is not None:\n return tags\n\n raise Exception('No keywords found from VoiceBase: \"%s\"' % entry_id)\n\n@app.task(name='brain.tasks.generate_voice_keyword_tags',\n max_retries=5,\n default_retry_delay=30,\n ignore_result=True)\ndef generate_voice_keyword_tags(entry_id):\n if not post_entry(entry_id):\n raise Exception('Unable to post entry to VoiceBase: \"%s\"' % entry_id)\n\n tags = get_keywords.delay(entry_id).result\n if not isinstance(tags, Excpetion):\n update_tags(entry_id, tags)\n save_service_status(entry_id, 'VOICEBASE', 'SUCCESS')\n else:\n save_service_status(entry_id, 'VOICEBASE', 'FAIL', str(tags))\n\n\n@app.task(name='brain.tasks.generate_thumbnail_at_time_from_kaltura')\ndef generate_thumbnail_at_time_from_kaltura(entry_id, seconds):\n image_file = os.path.join(settings.MEDIA_ROOT, 'kaltura', '%(entry_id)s.%(seconds)s.jpg' % locals())\n if not os.path.isdir(os.path.dirname(image_file)):\n os.makedirs(os.path.dirname(image_file))\n\n with open(image_file, 'wb') as f:\n f.write(get_entry_thumbnail(entry_id, seconds))\n\n return os.path.relpath(image_file, settings.MEDIA_ROOT)\n\n@app.task(name='brain.tasks.generate_image_samplings_from_kaltura')\ndef generate_image_samplings_from_kaltura(entry_id):\n duration = None # duration in seconds\n attempts = 5\n\n while attempts > 0:\n data = get_entry_metadata(entry_id)\n if 'duration' in data:\n duration = data['duration']\n break\n\n time.sleep(60.0 / attempts)\n attempts -= 1\n\n if duration is None:\n raise Exception('Cannot find video on Kaltura: \"%s\"' % entry_id)\n\n # number of samples should be proportion to the video duration\n step = min(1, max(5, int(duration / 5.0)))\n if settings.USE_ASYNC:\n results = ResultSet([generate_thumbnail_at_time_from_kaltura.delay(entry_id, i) for i in range(0, duration, step)])\n return results.join()\n else:\n return [generate_thumbnail_at_time_from_kaltura(entry_id, i) for i in range(0, duration, step)]\n\n@app.task(name='brain.tasks.process_fb_photo')\ndef process_fb_photo(fb_photo_obj, access_token, user_id='me'):\n return _process_fb_photo(fb_photo_obj, access_token, user_id)\n\n\n@app.task(name='brain.tasks.upload_fb_photos_for_training')\ndef upload_fb_photos_for_training(photos, group_name, media_uri):\n _upload_fb_photos_for_training(photos, group_name, media_uri)\n\n\n","repo_name":"9ae/djv","sub_path":"brain/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":7451,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"32561061445","text":"# AUTHOR = PAUL KEARNEY \r\n# STUDENT ID = G00364787\r\n# DATE = 2018-02-24\r\n#\r\n# STUDENT ID = G00364787\r\n# EXERCISE 04\r\n\r\n# projectEuler problem 2\r\n# references used\r\n# http://www.tutorialspoint.com/python/python_basic_operators.htm\r\n# https://www.tutorialspoint.com/python/python_strings.htm\r\n# https://stackoverflow.com/questions/9120059/odd-even-string-python\r\n#\r\n\r\n# function to calculate the FIBONACCI value for input value n\r\ndef fib(n):\r\n \"\"\"This function returns the nth Fibonacci number.\"\"\"\r\n i = 0\r\n j = 1\r\n n = n - 1\r\n\r\n while n >= 0:\r\n i, j = j, i + j\r\n n = n - 1\r\n \r\n return i\r\n\r\n# setup working storage\r\nnum = 0\r\ntotal = 0\r\nresult = 0\r\ntotal = 0\r\nok = 1\r\nopStr = \"\"\r\n\r\n# main routine\r\nwhile result < 4000000 and ok == 1: \r\n result = fib(num)\r\n if (result < 4000000):\r\n if (result %2 == 0 ):\r\n total = total+result \r\n else:\r\n ok = 0\r\n num = num + 1\r\n\r\n# program output to screen \r\nopStr = \"The sum of the even numbers 'under' 4 million is \"+ str(total) \r\nprint(opStr)\r\n \r\n\r\n# Sample output from program\r\n#\r\n# The sum of the even numbers 'under' 4 million is 4613732\r\n#\r\n#\r\n#\r\n#\r\n# Text screengrab from ProjectEuler website\r\n#Congratulations, the answer you gave to problem 2 is correct.\r\n#\r\n#You are the 588803rd person to have solved this problem.\r\n#\r\n#This problem had a difficulty rating of 5%. The highest difficulty rating you have solved remains at 5%. \r\n#\r\n#\r\n#","repo_name":"g00364787/52167assessments","sub_path":"gmit--exercise04--problem02--fibonacci-even-values-under-4million--code-and-output--20180224.py","file_name":"gmit--exercise04--problem02--fibonacci-even-values-under-4million--code-and-output--20180224.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27292849781","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 18 12:11:28 2019\n\n@author: ziqi\n\"\"\"\nfrom utils import check_mkdir\nimport os\n\npath = 'jobs/conservative_'\n\nconservative = ['False']\nfor item in conservative:\n path_item = path + item + '/'\n check_mkdir(path_item)\n for exp in range (0, 10):\n with open(path_item + 'exp_%d.sh' %(exp), 'w') as f:\n f.write(\"\"\"#!/bin/sh\n#SBATCH --partition=general --qos=short\n#SBATCH --time=4:00:00\n#SBATCH --gres=gpu\n#SBATCH --mem=5000\n#SBATCH --chdir=/tudelft.net/staff-bulk/ewi/insy/VisionLab/ziqiwang/attack/cifar\n#SBATCH --job-name=attack\"\"\" + '\\n'\n\"\"\"#SBATCH --mail-type=END\n\nmodule use /opt/insy/modulefiles\nmodule load cuda/10.1 cudnn/10.1-7.6.0.64\n\necho \"Starting at $(date)\"\nsrun python cifar.py --conservative=\"\"\" + item + ' --exp=' + str(exp) +'\\n' +\n\"\"\"echo \"Finished at $(date)\"\n\"\"\"\n)\n \n \nfor item in conservative:\n path_item = path + item + '/' \n job_files = os.listdir(path_item)\n with open(path_item + 'jobfile_all.sh', 'w') as f:\n for job in job_files:\n f.write('sbatch %s\\n' % job)\n","repo_name":"ziqiwangsilvia/attack","sub_path":"cifar/auto_bash_train_original_and_center.py","file_name":"auto_bash_train_original_and_center.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6303054070","text":"import requests\nfrom bs4 import BeautifulSoup as bs # create alias for BeautifulSoup\n\ngithub_user = input('Input GitHub User: ')\n\nwhile True:\n url = 'https://github.com/' + github_user\n r = requests.get(url)\n if r.status_code == 200:\n soup = bs(r.content, 'html.parser') # get content from URL html code\n profile_image = soup.find('img', {'class': 'avatar avatar-user width-full border color-bg-default'})[\n 'src'] # ['src'] get the src attribute\n print('Success: '+profile_image)\n break\n else:\n print('User Not found or bad spelling -- Try again ? ')\n github_user = input('Input GitHub User: ')","repo_name":"iril-dev/InspectorGadget","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17482589248","text":"# /*\n# \tConexión a SQLServer con Python\n# \tEjemplo de CRUD evitando inyecciones SQL\n\t\n# \t@author parzibyte\n# \tMás tutoriales en:\n# \t\t\t\t\t\t\n# */\n# CREATE TABLE IF NOT EXISTS peliculas(\n# \tid bigint identity(1,1) primary key,\t\n# \ttitulo VARCHAR(255) NOT NULL,\n# \tanio SMALLINT NOT NULL\n# );\n\n\n# --------------------- Funciona es el archivo db.py --------------------------\n# \"\"\"\n# Conexión a SQLServer con Python\n# Ejemplo de CRUD evitando inyecciones SQL\n \n# @author parzibyte\n# Más tutoriales en:\n \n# \"\"\"\n# import pyodbc\n# direccion_servidor = '127.0.0.1'\n# nombre_bd = 'pruebas_parzibyte'\n# nombre_usuario = 'usuario'\n# password = 'hunter2'\n# try:\n# conexion = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER=' +\n# direccion_servidor+';DATABASE='+nombre_bd+';UID='+nombre_usuario+';PWD=' + password)\n# # OK! conexión exitosa\n# except Exception as e:\n# # Atrapar error\n# print(\"Ocurrió un error al conectar a SQL Server: \", e)\n\n# ------------------ Insertar Datos --------------------------------\n\n\"\"\"\n Conexión a SQLServer con Python\n Ejemplo de CRUD evitando inyecciones SQL\n \n @author parzibyte\n Más tutoriales en:\n\nhttps://parzibyte.me/blog/2019/06/14/conexion-sql-server-python-pyodbc-crud/#Instalacion_de_PyODBC\n\n\"\"\"\nfrom bd import conexion\ntry:\n with conexion.cursor() as cursor:\n consulta = \"INSERT INTO peliculas(titulo, anio) VALUES (?, ?);\"\n # Podemos llamar muchas veces a .execute con datos distintos\n cursor.execute(consulta, (\"Volver al futuro 1\", 1985))\n cursor.execute(consulta, (\"Pulp Fiction\", 1994))\n cursor.execute(consulta, (\"It\", 2017))\n cursor.execute(consulta, (\"Ready Player One\", 2018))\n cursor.execute(consulta, (\"Spider-Man: un nuevo universo\", 2018))\n cursor.execute(consulta, (\"Avengers: Endgame\", 2019))\n cursor.execute(consulta, (\"John Wick 3: Parabellum\", 2019))\n cursor.execute(consulta, (\"Toy Story 4\", 2019))\n cursor.execute(consulta, (\"It 2\", 2019))\n cursor.execute(consulta, (\"Spider-Man: lejos de casa\", 2019))\n\nexcept Exception as e:\n print(\"Ocurrió un error al insertar: \", e)\nfinally:\n conexion.close()\n\n\n\n\n# import _mssql\n\n# server = 'SERVER_NAME'\n# user = 'USER_NAME'\n# password = ''\n# database = 'MY_DATABASE'\n# conn = _mssql.connect(server, user, password, database)\n\n# # aqui creamos una tabla de ejemplo\n\n# conn.execute_non_query('CREATE TABLE pets(id INT, name VARCHAR(100))')\n# conn.execute_non_query(\"INSERT INTO pets VALUES(1, 'Firulais')\")\n# conn.execute_non_query(\"INSERT INTO pets VALUES(2, 'Pelusa')\")\n\n","repo_name":"RichardJoshua/Python_sql","sub_path":"aplicacion.py","file_name":"aplicacion.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16291155880","text":"\"\"\"\nGiven an array of integers, return a new array such that each element at index i of the new array is the product of all the numbers in the original array except the one at i.\n\"\"\"\n\ndef getNewArray(arr):\n productFromRight = [1] * len(arr)\n for i in range(len(arr)-2, -1,-1):\n productFromRight[i] = productFromRight[i+1] * arr[i+1]\n\n result = []\n productFromLeft = 1\n for i in range(len(arr)):\n result.append(productFromRight[i] * productFromLeft)\n productFromLeft *= arr[i]\n\n return result\n\n\ndef main():\n arr = [int(x) for x in input().split()]\n print(getNewArray(arr))\n\nif __name__ == \"__main__\":\n main()","repo_name":"TheNamesRai/CodingProblems","sub_path":"Problems/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19555995610","text":"#!usr/bin/python\n#coding=utf-8\n\n# importing the basic library\nfrom __future__ import print_function\nimport sys, os\n\nsys.path.append(os.path.abspath('')) # 把当前目录设为引用模块的地址之一\n\nfrom utils import *\nfrom data_utils import *\nfrom models.solver_cnn_ import *\nfrom models.ConvNet import *\n\nimport numpy as np\nimport pandas as pd\nfrom itertools import product, permutations\n\nimport matplotlib.pyplot as plt\nprint()\ntest_ctx()\nprint()\n\n\n### Load Data ####\n# GW_address = '/floyd/input/waveform/'\nGW_address = './data/'\n\ndata = pd.DataFrame(np.load(GW_address+'GW_H1.npy'), index=np.load(GW_address+'GW_H1_index.npy'))\nprint('Raw data: ', data.shape)\npeak_samppoint = data.values.argmax(axis=1)\npeak_samppoint = int(peak_samppoint.sum() / peak_samppoint.shape[0])\npeak_time = peak_samppoint/data.shape[-1]\npeak_time = float('{:.2f}'.format(peak_time))\nprint('Peak sampling point at %dth (%.2fs).' %(peak_samppoint, peak_time))\nprint()\n\n### Split the Data\nprint('总波形数目:', data.index.shape)\ntrain_masses = [(float(masses.split('|')[0]), float(masses.split('|')[1])) for masses in data.index if float(masses.split('|')[0]) % 2 != 0]\ntest_masses = [(float(masses.split('|')[0]), float(masses.split('|')[1])) for masses in data.index if float(masses.split('|')[0]) % 2 == 0]\nprint('训练集波形数目:', len(train_masses))\nprint('测试集波形数目:', len(test_masses))\nprint()\n\n# 做好训练集和测试集的分割~\ntest_masses = [masses for masses in data.index if float(masses.split('|')[0]) % 2 == 0]\ntrain_masses = [masses for masses in data.index if float(masses.split('|')[0]) % 2 != 0]\ntrain_data = nd.array(data.loc[train_masses], ctx=mx.cpu())\ntest_data = nd.array(data.loc[test_masses], ctx=mx.cpu())\n\n## Training\nparams_tl = None\n# for snr in list([1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]):\n# params_tl = nd.load('/floyd/input/pretrained/OURs/snr_8_best_params_epoch@16.pkl')\n# for snr in list([0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]):\n# params_tl = nd.load('/floyd/input/pretrained/OURs/snr_2_best_params_epoch@20.pkl')\nsave_address = 'OURs_modified'\nSNR_list = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2,0.1]\ni = 0\nwhile True:\n try:\n snr = SNR_list[i]\n except IndexError:\n break\n\n model = ConvNet(conv_params = {'kernel': ((1,16), (1,8), (1,8)), \n 'num_filter': (16, 32, 64,),\n 'stride': ((1,1), (1,1), (1,1),),\n 'padding': ((0,0), (0,0), (0,0),),\n 'dilate': ((1,1), (1,1), (1,1),)},\n act_params = {'act_type': ('elu', 'elu', 'elu', 'elu',)},\n pool_params = {'pool_type': ('max', 'max', 'max',),\n 'kernel': ((1,16), (1,16), (1,16),),\n 'stride': ((1,2), (1,2), (1,2),),\n 'padding': ((0,0),(0,0), (0,0),),\n 'dilate': ((1,1), (1,1), (1,1),)},\n fc_params = {'hidden_dim': (256, 128, 64)}, drop_prob = 0.5, \n# input_dim = (2,1,8192)\n input_dim = (1,1,8192)\n )\n \n\n Solver = Solver_nd(model = model, \n train = train_data,#[:100,:],\n test = test_data,#[:100,:],\n SNR = snr, params = params_tl,\n num_epoch=30, rand_times = 2,\n batch_size = 256, stacking_size = 512,\n lr_rate=0.0001#, localnoise = localnoise\n ,save_checkpoints_address = './pretrained_models/%s/' %save_address\n ,checkpoint_name = 'snr_%s' %int(snr*100), floydhub_verbose =False, )\n\n try:\n Solver.Training()\n except mx.MXNetError:\n print('Rerunning...')\n continue\n\n params_tl = Solver.best_params\n i += 1\n\n###########\n\n auc_list = []\n snr_list = np.linspace(0.1, 1, 10)\n j = 0\n while True:\n try:\n snr = snr_list[j]\n print('Testing for snr=', snr)\n except IndexError:\n break\n\n try:\n Solver = Solver_nd(model = OURs_modified, \n train = train_data,\n test = test_data,\n SNR = snr, \n batch_size = 256)\n except mx.MXNetError:\n print('Rerunning...')\n continue\n auc_var_list = []\n i = 0\n while True:\n if i == 2: break\n else: pass\n try:\n prob, label , _= Solver.predict_nd()\n except mx.MXNetError:\n print('Rerunning...')\n continue\n fpr, tpr, thresholds = metrics.roc_curve(label, prob, pos_label=1)\n auc = metrics.auc(fpr, tpr)\n auc_var_list.append(auc)\n print('{\"metric\": \"AUC for SNR(model,test)=(%s,(0.1~10))\", \"value\": %.5f}' %(param_add.split('_')[1], auc) )\n\n i += 1\n j += 1\n \n auc_list.append(auc_var_list)\n auc_frame.append(auc_list) ","repo_name":"iphysresearch/Python4GW","sub_path":"run_modified.py","file_name":"run_modified.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"17220174365","text":"import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\ndef get_firestore_client():\n\n print(\"init_config_firestore...\")\n try:\n\n cred = credentials.Certificate('static/data-base-teste-277100-8a0d28ccf452.json')\n firebase_admin.initialize_app(cred)\n db = firestore.client()\n return db\n except ValueError as error:\n print(\"erro ao init_config_firestore.\" + str(error))","repo_name":"robertosrjr/flask-restApi","sub_path":"apps/users/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30408808673","text":"from __future__ import division\nfrom itertools import *\n\nfrom Util import *\n\nlcm_pair = Util.lcm_pair\n\nclass ZipIter:\n\tdef __init__ (self, rangeIter, chakraIter, solfeggio):\n\t\t#print rangeIter\n\t\t#print chakraIter\n\t\t(a, b, c) = lcm_pair (len (rangeIter), len (chakraIter))\n\t\t#(a, b, c) = lcm_pair (rangeIter.length, chakraIter.length)\n\t\tself.rangeIter = cycle (\n\t\t\t#repeat (\n\t\t\t\trangeIter\n\t\t\t#, a)\n\t\t)\n\t\tself.chakraIter = cycle (\n\t\t\t#repeat (\n\t\t\t\tchakraIter\n\t\t\t#, b)\n\t\t)\n\t\tself.solfeggio = solfeggio\n\t\tself.length = c\n\tdef __len__ (self):\n\t\treturn self.length\n\tdef __iter__ (self):\n\t\treturn self\n\tdef next (self):\n\t\tr = self.rangeIter.next ()\n\t\tc = self.chakraIter.next ()\n\t\treturn self.solfeggio[r, c]","repo_name":"InnovAnon-Inc/HafrenHaver","sub_path":"old/music0/ZipIter.py","file_name":"ZipIter.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"17309229428","text":"CONFIGS_ = {\n # input_channel, n_class, hidden_dim, latent_dim\n 'mnist': ([6, 16, 'F'], 1, 10, 784, 32),\n 'mnist_cnn1': ([6, 'M', 16, 'M', 'F'], 1, 10, 64, 32),\n 'mnist_cnn2': ([16, 'M', 32, 'M', 'F'], 1, 10, 128, 32)\n}\n\n# temporary roundabout to evaluate sensitivity of the generator\nGENERATORCONFIGS = {\n # hidden_dimension, latent_dimension, input_channel, n_class, noise_dim\n 'mnist': (256, 32, 1, 10, 32),\n 'mnist-cnn0': (256, 32, 1, 10, 64),\n 'mnist-cnn1': (128, 32, 1, 10, 32),\n 'mnist-cnn2': (64, 32, 1, 10, 32),\n 'mnist-cnn3': (64, 32, 1, 10, 16)\n}\n\n\n\nRUNCONFIGS = {\n\n 'mnist':\n {\n 'ensemble_lr': 3e-4,\n 'ensemble_batch_size': 128,\n 'ensemble_epochs': 50,\n 'num_pretrain_iters': 20,\n 'ensemble_alpha': 1, # teacher loss (server side)\n 'ensemble_beta': 0, # adversarial student loss\n 'ensemble_eta': 1, # diversity loss\n 'unique_labels': 10, # available labels\n 'generative_alpha': 10, # used to regulate user training\n 'generative_beta': 10, # used to regulate user training\n 'weight_decay': 1e-2\n }\n\n}\n\n","repo_name":"ThisPlatypus/FedGen_partenza","sub_path":"utils/model_config.py","file_name":"model_config.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70954038665","text":"# -*- coding: utf-8 -*-\nimport telebot\nimport pyowm\nimport urllib as urllib2\nfrom googletrans import Translator\n\n\ntoken = \" \"\n\nbot = telebot.TeleBot(token)\n\n\ngreetings = [\"Privet\", \"Hello\", \"Zdrastvdui\",\"Salamaleikum\", ]\nhow_are_you = [\"Otlishno\", \"Uzhasno\", \"Horowo\", \"Super\",\"Poidet\",]\nbot = telebot.TeleBot(\"690263322:AAH-3v6gvlCpcMFS0FNYtkoCnnVSV3IsFWjw\")\n\n@bot.message_handler(commands=[\"weather\"])\ndef weather(message):\n city = bot.send_message(message.chat.id, \"V kakom gorode Vam pokazat pogodku?\")\n bot.register_next_step_handler(city, weath)\n\n\n\n@bot.message_handler(commands=[\"start\"])\ndef handle_start(message):\n user_markup =telebot.types.ReplyKeyboardMarkup()\n user_markup.row('/start','/translate','/weather')\n user_markup.row('audio', 'photo',)\n bot.send_message(message.from_user.id,'Hello',reply_markup=user_markup)\n\n@bot.message_handler(commands=[\"translate\"])\ndef handle_start(message):\n user_markup =telebot.types.ReplyKeyboardMarkup()\n user_markup.row('/english','/russian','/start')\n bot.send_message(message.from_user.id,'what language do you want to translate ??. , на какой язык вы хотите перевести ??.. ',reply_markup=user_markup)\n\n\n\n@bot.message_handler(commands=[\"english\"])\ndef handle_english(message):\n word = bot.send_message(message.chat.id, \"Введите предложение для перевода\")\n bot.register_next_step_handler(word, get_translater)\n\n\ndef get_translater(message):\n translator = Translator()\n translations = translator.translate(message.text, dest='en') # dest - na kakoi yazyk perevesti nado (ru-russkii)\n output = translations.text\n bot.send_message(message.chat.id, output)\n\n@bot.message_handler(commands=[\"russian\"])\ndef handle_english(message):\n wor = bot.send_message(message.chat.id, \"Введите предложение для перевод\")\n bot.register_next_step_handler(wor, get_translate)\n\n\ndef get_translate(message):\n translator = Translator()\n translations = translator.translate(message.text, dest='ru') # dest - na kakoi yazyk perevesti nado (ru-russkii)\n output = translations.text\n bot.send_message(message.chat.id, output)\n\n\n@bot.message_handler(content_types=[\"text\"])\ndef handle_text(message):\n if message.text =='photo':\n url='https://goo.gl/58RCFF'\n urllib2.urlretrieve(url, 'url_image.jpg')\n img=open('url_image.jpg', 'rb')\n bot.send_chat_action(message.from_user.id, 'upload_photo')\n bot.send_photo(message.from_user.id, img)\n img.close()\n elif message.text == 'audio':\n audio = open(\"/Users/admin/Downloads/audio.mp3\", 'rb')\n bot.send_chat_action(message.from_user.id, 'upload_audio')\n bot.send_audio(message.from_user.id, audio)\n audio.close()\n\n\ndef weath(message):\n owm = pyowm.OWM(\"9bada2f6f1939c15ffa6315235371194\", language=\"ru\")\n city = message.text\n weather = owm.weather_at_place(city)\n w = weather.get_weather()\n temperature = w.get_temperature(\"celsius\")[\"temp\"]\n wind = w.get_wind()[\"speed\"]\n hum = w.get_humidity()\n desc = w.get_detailed_status()\n bot.send_message(message.chat.id, u' '.join((city,desc)).encode('utf-8') +\n \" Temperature: %d, Humidity: %d, Wind: %d m/s\" % (temperature, hum, wind))\n\n\n\n\nimport time\nif __name__ == \"__main__\":\n # bot.infinity_polling(True)\n while True:\n try:\n bot.polling(none_stop=True)\n except Exception as e:\n print('Error in main: %s' % e)\n time.sleep(10)\n\n\n\n","repo_name":"Bekzhannn/weatherbot","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37248071715","text":"# a.k.a Friend Circles\n# https://www.youtube.com/watch?v=HHiHno66j40\n\n# Looks like the island question...\n# https://www.youtube.com/watch?v=YbCpAU5g0rg\n\n\n# Also see 200\nclass Solution:\n\n def findCircleNum(self, isConnected: List[List[int]]) -> int:\n provinces = 0\n visited = set()\n stack = []\n # 200 use the queue\n\n for i in range(len(isConnected)):\n if i not in visited:\n stack.append(isConnected[i])\n while stack:\n city = stack.pop()\n for j in range(len(city)):\n if city[j] == 1 and j not in visited:\n stack.append(isConnected[j])\n visited.add(j)\n provinces += 1\n return provinces\n\n\n# Another solution:\n# https://www.youtube.com/watch?v=kbLxd7nnekI\n# DFS\n\n\nclass Solution:\n\n def findCircleNum(self, isConnected: List[List[int]]) -> int:\n provinces = 0\n visit = set()\n\n def dfs(givenNeighConnections):\n for anotherCity, isInConnection in enumerate(\n givenNeighConnections):\n # anotherCity: the index\n # isInConnection: the value 1\n if isInConnection and anotherCity not in visit:\n visit.add(anotherCity)\n dfs(isConnected[anotherCity])\n\n for city, neighConnection in enumerate(isConnected):\n if city not in visit:\n provinces += 1\n dfs(neighConnection)\n\n return provinces\n","repo_name":"YiruDing/LeetcodePractice","sub_path":"Neetcode305/Graphs/547_Number_of_Provinces.py","file_name":"547_Number_of_Provinces.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"16460819562","text":"###################################\n# File transfer tools #\n###################################\n\nimport re\nimport os\nimport time\nfrom common.connection import Connection\nfrom common import helpers as H\nfrom common import audio as AA\nfrom PIL import Image\nfrom common.imgcvt import convert_To, gfxmodes, PreProcess, dithertype, cropmodes, open_Image, mode_conv, build_File, im_extensions\nfrom io import BytesIO\nfrom common import turbo56k as TT\nfrom common import style as S\nfrom common.bbsdebug import _LOG, bcolors\nfrom crc import Calculator, Configuration\n\n########################################################################\n# Display image dialog\n########################################################################\n# conn: Connection\n# title: Title of the dialog\n# width/height: Original size of the image\n# save: Show save option\n########################################################################\n# Returns:\tBit 1: Graphic mode 0: Hires | 1: Multicolor\n#\t\t\tBit 7: 0: View Image | 1: Save image\n########################################################################\ndef ImageDialog(conn:Connection, title, width=0, height=0, save=False):\n S.RenderDialog(conn, (11 if save and width !=0 else 10), title)\n keys= b'\\r'\n tml = ''\n if width != 0:\n tml += f''' Original size: {width}x{height}

\n Select:

* < M > for multicolor conversion
\n < H > for hi-res conversion
'''\n keys += b'HM'\n if save:\n tml += '
< S > to save image
'\n keys += b'S'\n tml += ' < RETURN > to view image
'\n conn.SendTML(tml)\n out = 1\n while conn.connected:\n k = conn.ReceiveKey(keys)\n if k == b'H' and out == 1:\n conn.SendTML(' *')\n out = 0\n elif k == b'M' and out == 0:\n conn.SendTML(' *')\n out = 1\n elif k == b'S':\n out |= 0x80\n break\n elif k == b'\\r':\n break\n conn.SendTML('')\n return out\n\n######################################################################################################################################################################################################################################\n# Send bitmap image\n######################################################################################################################################################################################################################################\n# conn: Connection to send the image/dialog to\n# filename: file name or image object\n# lines: Number of \"text\" lines to send, from the top\n# display: Display the image after transfer\n# dialog: Show convert options dialog before file transfer\n# gfxmode: Graphic mode\n# preproc: Preprocess image before converting\n######################################################################################################################################################################################################################################\ndef SendBitmap(conn:Connection, filename, dialog = False, save = False, lines = 25, display = True, gfxmode:gfxmodes = None, preproc:PreProcess = None, cropmode:cropmodes = cropmodes.FILL, dither:dithertype = dithertype.BAYER8):\n\n if gfxmode == None:\n gfxmode = conn.encoder.def_gfxmode\n if conn.mode == 'PET64':\n gfxhi = gfxmodes.C64HI\n gfxmulti = gfxmodes.C64MULTI\n elif conn.mode == 'PET264':\n gfxhi = gfxmodes.P4HI\n gfxmulti = gfxmodes.P4MULTI\n\n ftitle = {'.GIF':' GIF Image ', '.PNG':' PNG Image ', '.JPEG':' JPEG Image ', '.JPG':' JPEG Image '}\n fok = 'File transfer successful!
'\n fabort = ' File transfer aborted!
'\n\n # Find image format\n data = [None]*3\n bgcolor = None\n border = None\n\n mode = 1 if gfxmode == gfxmulti else 0\n save &= conn.QueryFeature(TT.FILETR) < 0x80\n pimg = None\n convert = False\n Source = None\n\n if type(filename)==str:\n extension = os.path.splitext(filename)[1].upper()\n if extension not in ['.GIF','.PNG','.JPG','JPEG']:\n pimg = open_Image(filename)\n if pimg == None:\t#Invalid file, exit\n return\n elif pimg[1] not in conn.encoder.gfxmodes:\t#Image from another platform, convert\n convert = True\n Source = pimg[0]\n if preproc == None:\n preproc = PreProcess()\n cropmode: cropmodes.TOP\n if gfxmode == None:\n gfxmode = mode_conv[conn.mode][pimg[1]]\n else:\n gfxmode = pimg[1]\n data = pimg[2]\n text = pimg[4]\n border = pimg[3][0]\n gcolors = pimg[3]\n else:\n text = ftitle[extension]\n convert = True\n else:\n convert = True\n\n if convert:\n conn.SendTML('')\n if Source == None:\n if type(filename)==str:\n Source = Image.open(filename)\n elif type(filename)==bytes:\n Source = Image.open(BytesIO(filename))\n elif isinstance(filename,Image.Image):\n Source = filename\n Source = Source.convert(\"RGB\")\n if dialog:\n mode = ImageDialog(conn,text,Source.size[0],Source.size[1],save)\n if mode < 0:\n return()\n conn.SendTML('')\n else:\n mode = 1 if (gfxmode == gfxmulti) else 0\n gfxmode = gfxmulti if (mode & 0x7f) == 1 else gfxhi\n if preproc == None and conn.mode == 'PET264':\n preproc = PreProcess(1,1.5,1.5)\n cvimg,data,gcolors = convert_To(Source, gfxmode, preproc, cropmode=cropmode,dither=dither)\n Source.close()\n bgcolor = bytes([gcolors[0]])\t#Convert[4].to_bytes(1,'little')\n gcolors = [gcolors[0]]+gcolors # Border color = bgcolor\n #\n border = bgcolor if border == None else bytes([border])\n elif pimg != None and dialog:\n mode = ImageDialog(conn,text,save=save)\n\n\n tchars = 40*lines\n tbytes = 320*lines\n\n if mode & 0x80 == 0:\t# Transfer to memory\n # Sync\n binaryout = b'\\x00'\n # Enter command mode\n binaryout += b'\\xFF'\n # Set the transfer pointer + $10 (bitmap memory)\n binaryout += b'\\x81\\x10'\n # Transfer bitmap block + Byte count (low, high)\n binaryout += b'\\x82'\n binaryout += tbytes.to_bytes(2,'little')\t#Block size\n # Bitmap data\n binaryout += data[0][0:tbytes] #Bitmap\n # Set the transfer pointer + $00 (screen memory)\n binaryout += b'\\x81\\x00'\n # Transfer screen block + Byte count (low, high)\n binaryout += b'\\x82'\n binaryout += tchars.to_bytes(2,'little')\t#Block size\n # Screen Data\n binaryout += data[1][0:tchars] #Screen\n if border == None:\n border = b'\\x00' if bgcolor == None else bgcolor\n border = bytes([border]) if type(border) == int else border\n if (gfxmode == gfxmulti) or (conn.mode == 'PET264' and data[2] != None):\n # Set the transfer pointer + $20 (color memory)\n binaryout += b'\\x81\\x20'\n # Transfer color block + Byte count (low, high)\n binaryout += b'\\x82'\t# Color data\n binaryout += tchars.to_bytes(2,'little')\t#Block size\n binaryout += data[2][0:tchars] #ColorRAM\n if bgcolor == None:\n bgcolor = bytes([gcolors[1]]) if gcolors[1] != None else b'\\x00'\n if display:\n if gfxmode == gfxmulti:\n # Switch to multicolor mode + Page number: 0 (default) + Border color: border + Background color: bgcolor\n binaryout += b'\\x92\\x00'\n binaryout += border\n binaryout += bgcolor\n if conn.mode == 'PET264':\n binaryout += gcolors[4].to_bytes(1,'big')\n else:\n # Switch to hires mode + Page number: 0 (default) + Border color: border\n binaryout += b'\\x91\\x00'\n binaryout += border\n # Exit command mode\n binaryout += b'\\xFE'\n if display:\n conn.Sendall(TT.disable_CRSR())\t#Disable cursor blink\n conn.Sendallbin(binaryout)\n return bgcolor\n else:\n savename = os.path.splitext(os.path.basename(filename))[0]\n if conn.mode in ['PET64','PET264']:\n savename = savename.upper().translate({ord(i): None for i in ':#$*?'})\t#Remove CBMDOS reserved characters\n binaryout, savename = build_File(data,gcolors,savename, gfxmode)\n if TransferFile(conn, binaryout, savename):\n conn.SendTML(fok)\n else:\n conn.SendTML(fabort)\n conn.SendTML('')\n return\n\n####################################################################################\n# Sends a file to the client, calls the adequate function according to the filetype\n####################################################################################\n# conn: Connection\n# filename: path to the file to transfer\n# dialog: Show dialog before transfer\n# save: Allow file downloading to disk\n####################################################################################\ndef SendFile(conn:Connection,filename, dialog = False, save = False):\n fok = 'File transfer successful!
'\n fabort = ' File transfer aborted!
'\n if os.path.exists(filename):\n ext = os.path.splitext(filename)[1].upper()\n # Executables\n if ext == '.PRG' and 'PET' in conn.mode:\n if conn.encoder.check_fit(filename):\n if dialog:\n res = FileDialog(conn,os.path.basename(filename), os.path.getsize(filename), 'Commodore Program', save = save)\n else:\n res = 1+(1*save)\n elif save:\n if dialog:\n res = FileDialog(conn,os.path.basename(filename), os.path.getsize(filename), 'Commodore Program','Download to disk', save = False)*2\n else:\n res = save\n else:\n res = 0\n if res == 1:\n SendProgram(conn,filename)\n elif res == 2:\n savename = os.path.splitext(os.path.basename(filename))[0].upper()\n savename = savename.translate({ord(i): None for i in ':#$*?'})\t#Remove CBMDOS reserved characters\n if TransferFile(conn,filename, savename[:16]):\n conn.SendTML(fok)\n else:\n conn.SendTML(fabort)\n conn.SendTML('')\n conn.ReceiveKey()\n return\n # Text files\n elif ext in ['.SEQ','.TXT']:\n if dialog:\n res = FileDialog(conn,os.path.basename(filename), os.path.getsize(filename), 'Sequential/Text File', 'view', save = save)\n else:\n res = 1+(1*save)\n if res == 1:\n title = 'Viewing text file' if ext == '.TXT' else ''\n SendText(conn,filename,title)\n elif res == 2:\n if ext == '.TXT':\n if len(os.path.basename(filename)) > 16:\n fn = os.path.splitext(os.path.basename(filename))\n savename = (fn[0][:16-len(fn[1])]+fn[1]).upper()\n else:\n savename = os.path.basename(filename).upper()\n else:\n savename = os.path.splitext(os.path.basename(filename))[0].upper()\n savename = savename.translate({ord(i): None for i in ':#$*?'})\t#Remove CBMDOS reserved characters\n if TransferFile(conn,filename, savename[:16],True):\n conn.SendTML(fok)\n else:\n conn.SendTML(fabort)\n conn.SendTML('')\n conn.ReceiveKey()\n return\n # Images\n elif ext in ['.JPG','.GIF','.PNG']+im_extensions: #,'.OCP','.KOA','.KLA','.ART','.DD','.DDL']:\n SendBitmap(conn,filename,dialog,save)\n conn.SendTML('')\n elif ext == '.C':\n ...\n elif ext == '.PET':\n ...\n # Audio\n elif ext in ['.MP3','.WAV'] and not save:\n AA.PlayAudio(conn,filename,None,dialog)\n # TML script\n elif ext == '.TML': \n with open(filename,'r') as slide:\n tml = slide.read()\n conn.SendTML(tml)\n #Default -> download to disk\n elif save:\n if dialog:\n res = FileDialog(conn,os.path.basename(filename), os.path.getsize(filename), 'Download file to disk', prompt='save to disk', save = False)\n else:\n res = 1\n if res == 1:\n if len(os.path.basename(filename)) > 16:\n fn = os.path.splitext(os.path.basename(filename))\n savename = (fn[0][:16-len(fn[1])]+fn[1]).upper()\n else:\n savename = os.path.basename(filename).upper()\n savename = savename.translate({ord(i): None for i in ':#$*?'})\t#Remove CBMDOS reserved characters\n if TransferFile(conn,filename,savename[:16]):\n conn.SendTML(fok)\n else:\n conn.SendTML(fabort)\n conn.SendTML('')\n conn.ReceiveKey()\n\n#################################################################################\n# Sends program file into the client memory at the correct address in turbo mode\n#################################################################################\n# conn: Connection to send the file to\n# filename: name+path of the file to be sent\n#################################################################################\ndef SendProgram(conn:Connection,filename):\n # Verify .prg extension\n ext = os.path.splitext(filename)[1].upper()\n if ext == '.PRG' and conn.encoder.check_fit(filename):\n _LOG('Memory transfer, filename: '+filename, id=conn.id,v=3)\n # Open file\n archivo=open(filename,\"rb\")\n # Read load address\n binario=archivo.read(2)\n staddr = binario[0]+(binario[1]*256)\n # Sync\n binaryout = b'\\x00'\n # Enter command mode\n binaryout += b'\\xFF'\n\n # Set the transfer pointer + load address (low:high)\n filesize = os.path.getsize(filename) - 2\n endaddr = staddr + filesize\n binaryout += b'\\x80'\n if isinstance(binario[0],str) == False:\n binaryout += binario[0].to_bytes(1,'big')\n binaryout += binario[1].to_bytes(1,'big')\n else:\n binaryout += binario[0]\n binaryout += binario[1]\n # Set the transfer pointer + program size (low:high)\n binaryout += b'\\x82'\n binaryout += filesize.to_bytes(2,'little')\n _LOG('Load Address: '+bcolors.OKGREEN+str(binario[1]*256+binario[0])+bcolors.ENDC, '/ Bytes: '+bcolors.OKGREEN+str(filesize)+bcolors.ENDC,id=conn.id,v=4)\n # Program data\n binaryout += archivo.read(filesize)\n\n # Exit command mode\n binaryout += b'\\xFE'\n # Close file\n archivo.close()\n # Send the data\n conn.Sendallbin(binaryout)\n conn.SendTML( f'Program file transferred to ${staddr:0{4}x}-${endaddr:0{4}x}
'\n f'To execute this program, Log off from
'\n f'this BBS, and exit Retroterm with
RUN/STOP.
'\n f'Then use RUN or the correct SYS.
'\n f'Or Continue your session')\n if conn.ReceiveKey(b'CL') == b'L':\n conn.connected = False\n\n#####################################################################################\n# Transfer a file to be stored in media by the client\n#####################################################################################\n# conn: Connection to send the file to\n# file: name+path of the file to be sent, or bytes\n# savename: if defined, the filename sent to the client (mandatory if file is bytes)\n#####################################################################################\ndef TransferFile(conn:Connection, file, savename = None, seq=False):\n if isinstance(file,str):\n if os.path.exists(file) == False:\n return False\n else:\n with open(file,'rb') as fb:\n data = fb.read()\n else:\n data = file\n if (conn.QueryFeature(TT.FILETR) < 0x80):\n basename = os.path.basename(file).upper()\n conn.Sendall(chr(TT.CMDON)+chr(TT.FILETR))\n if os.path.splitext(basename)[1] == '.SEQ' or seq:\n conn.Sendallbin(b'\\x00')\t# File type: SEQ\n else:\n conn.Sendallbin(b'\\xF0')\t# File type: PRG\n if savename != None:\n basename = savename\n else:\n basename = os.path.splitext(basename)[0]\n time.sleep(0.1)\n conn.Sendall(basename+chr(0))\n repeats = 0\n b_crc = Calculator(Configuration(width=16, polynomial=0x1021, init_value=0xffff, final_xor_value=0, reverse_input=False, reverse_output=False))\n if conn.ReceiveKey(b'\\x81\\x42\\xAA') == b'\\x81':\n for i in range(0,len(data),256):\n block = data[i:i+256]\n repeats = 0\n while repeats < 4:\n conn.Sendallbin(len(block).to_bytes(2,'big')) # Endianess switched around because the terminal stores it back to forth\n conn.Sendallbin(b_crc.checksum(block).to_bytes(2,'big')) # Endianess switched around because the terminal stores it back to forth\n conn.Sendallbin(block)\n rpl = conn.ReceiveKey(b'\\x81\\x42\\xAA')\n if rpl == b'\\x81':\n break # Block OK get next block\n elif rpl == b'\\xAA':\n repeats += 1 # Block error, resend\n _LOG('File download-Block CRC error',id=conn.id,v=3)\n else:\n _LOG('File download canceled',id=conn.id,v=3)\n repeats = 5 # Disk error/User abort\n\n if repeats >= 4:\n conn.Sendallbin(b'\\x00\\x00\\x00\\x00') # Zero length block ends transfer\n break\n else:\n repeats = 5\n conn.Sendallbin(b'\\x00\\x00\\x00\\x00\\x00\\x00') # Make sure terminal exits transfer mode. Zero length block ends transfer + NULL name + Sequential file\n if repeats < 4:\n _LOG('TransferFile: Transfer complete',id=conn.id,v=3)\n elif repeats == 4:\n _LOG('TransferFile: Transfer aborted, too many errors',id=conn.id,v=2)\n else:\n _LOG('TransferFile: Client aborted the transfer',id=conn.id,v=2)\n return repeats < 4\n else:\n _LOG(\"TransferFile: Client doesn't suppport File Transfer command\", id = conn.id, v=2)\n return False\n\n##########################################################################################################\n# Generic file dialog\n##########################################################################################################\n# conn: Connection\n# filename: File basename\n# size:\tFile size, 0 to ignore\n# filetype: File type, shown as title, if none, filename is used as title\n# prompt: option prompt text\n# save: Show save option\n##########################################################################################################\n# Returns: \t0: Cancel\n#\t\t\t1: option\n#\t\t\t2: ave option\n##########################################################################################################\ndef FileDialog(conn:Connection,filename:str,size=0,filetype=None,prompt='transfer to memory',save=False):\n S.RenderDialog(conn,5+(size!=0)+(filetype!=None)+save,(filename if filetype == None else filetype))\n tml = ''\n keys = b'_\\r'\n if filetype != None:\n tml += f' File: {H.crop(filename,32)}
'\n if size > 0:\n tml += f' Size: {size}

'\n else:\n tml += '
'\n if save:\n tml += ' Press <S> to save to disk, or
'\n keys += b'S'\n else:\n tml += ' Press'\n tml += f' <RETURN> to {prompt[:26]}
<> to cancel'\n conn.SendTML(tml)\n rc = conn.ReceiveKey(keys)\n return keys.index(rc)\n\n########################################################\n# Sends a file directly without processing\n########################################################\n# conn: Connection to send the file to\n# filename: name+path of the file to be sent\n# wait: boolean, wait for RETURN after sending the file\n########################################################\ndef SendRAWFile(conn:Connection,filename, wait=True):\n _LOG('Sending RAW file: ', filename, id=conn.id,v=3)\n\n with open(filename,'rb') as rf:\n binary=rf.read()\n conn.Sendallbin(binary)\n # Wait for the user to press RETURN\n if wait == True:\n conn.ReceiveKey()\n\n\n#############################################################\n# Sends a text or sequential file\n#############################################################\ndef SendText(conn:Connection, filename, title='', lines=25):\n if title != '':\n S.RenderMenuTitle(conn, title)\n l = 22\n conn.Sendall(TT.set_Window(3,24))\n else:\n l = lines\n conn.SendTML('')\n\n if filename.endswith(('.txt','.TXT')):\n #Convert plain text to PETSCII and display with More\n with open(filename,\"r\") as tf:\n ot = tf.read()\n text = H.formatX(ot)\n elif filename.endswith(('.seq','.SEQ')):\n with open(filename,\"rb\") as tf:\n ot = tf.read()\n tf.close()\n text = ot.decode('latin1')\n H.More(conn,text,l)\n\n if lines == 25:\n conn.Sendall(TT.set_Window(0,24))\n return -1\n\n####################################################\n# Send C formatted C64 screens\n#################################################### \ndef SendCPetscii(conn:Connection,filename,pause=0):\n try:\n fi = open(filename,'r')\n except:\n return()\n text = fi.read()\n fi.close\n if text.find('upper') != -1:\n cs = ''\n else:\n cs = ''\n frames = text.split('unsigned char frame')\n for f in frames:\n if f == '':\n continue\n binary = b''\n fr = re.sub('(?:[0-9]{4})*\\[\\]={// border,bg,chars,colors\\n','',f)\n fl = fr.split('\\n')\n scc = fl[0].split(',')\n bo = int(scc[0]).to_bytes(1,'big') #border\n bg = int(scc[1]).to_bytes(1,'big') #background\n binary += b'\\xff\\xb2\\x00\\x90\\x00'+bo+bg+b'\\x81\\x00\\x82\\xe8\\x03'\n i = 0\n for line in fl[1:26]:\n for c in line.split(','):\t#Screen codes\n if c.isnumeric():\n binary += int(c).to_bytes(1,'big')\n i += 1\n binary+= b'\\x81\\x20\\x82\\xe8\\x03'\n i = 0\n for line in fl[26:52]:\n for c in line.split(','):\t#Color RAM\n if c.isnumeric():\n binary += int(c).to_bytes(1,'big')\n i+=1\n binary+= b'\\xfe'\n conn.Sendallbin(binary)\n conn.SendTML(cs)\n if pause > 0:\n time.sleep(pause)\n else:\n conn.ReceiveKey()\n conn.Sendall(TT.enable_CRSR())\n return -1\n\n##############################################\n# Send .PET formatted C64 screens\n##############################################\ndef SendPETPetscii(conn:Connection,filename):\n try:\n f = open(filename,'rb')\n except:\n return -1\n pet = f.read()\n bo = pet[2].to_bytes(1,'big')\n bg = pet[3].to_bytes(1,'big')\n binary = b'\\xff\\xb2\\x00\\x90\\x00'+bo+bg+b'\\x81\\x00\\x82\\xe8\\x03'\n binary += pet[5:1005]\n binary += b'\\x81\\x20\\x82\\xe8\\x03'\n binary += pet[1005:]\n binary += b'\\xfe'\n conn.Sendallbin(binary)\n if pet[4] == 1:\n conn.SendTML('')\n else:\n conn.SendTML('')\n return 0\n\n###########\n# TML tags\n###########\nt_mono = {\t'SENDRAW':(lambda c,file:SendRAWFile(c,file,False),[('c','_C'),('file','')]),\n 'SENDFILE':(lambda c,file,dialog,save:SendFile(c,file,dialog,save),[('c','_C'),('file',''),('dialog',False),('save',False)])}\n","repo_name":"retrocomputacion/retrobbs","sub_path":"common/filetools.py","file_name":"filetools.py","file_ext":"py","file_size_in_byte":24862,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"81"} +{"seq_id":"73971988104","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nВыдача сдачи — 2\n================\nИмеется неограниченное количество монет в 1, 2, 5, 10 рублей.\nОпределите, сколькими способами можно выдать сдачу в n рублей.\nНапример, 5 рублей можно выдать четырьмя способами:\n5 = 2 + 2 + 1 = 2 + 1 + 1 + 1 = 1 + 1 + 1 + 1 + 1.\n\nВходные данные:\nПрограмма получает на вход натуральное число n, не превышающее 10e6.\n\nПримечание:\nПравильное решение задачи можно написать, используя всего один цикл while. \n\"\"\"\n\nn = int(input())\ni = i5 = 0\nwhile i5 <= n:\n i1 = n - i5\n i += (i1 // 2 + 1) * (i5 // 10 + 1)\n i5 += 5\nprint(i)\n\n","repo_name":"azorg/py4child","sub_path":"sirius/python-v1.2/06-while/test_6.7+++.py","file_name":"test_6.7+++.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71837044746","text":"from bokeh.models import ColumnDataSource\nfrom bokeh.plotting import figure\nfrom bokeh.models import BooleanFilter, CDSView, DateRangeSlider, CustomJS, BasicTickFormatter, HoverTool\nfrom bokeh.layouts import column\nfrom utilities import *\n\n\ndef death_and_cases_plot(cases_dataframe, death_dataframe, country_name, y_axis_type):\n \"\"\"\n This function takes in cases dataframes, deaths dataframe, country name, and y-axis type as a parameter.\n It creates a line chart with cases and deaths of the country that is passed as a parameter. The plots y-axis type\n will either be linear or log, depending on the parameter. Then returns the plot.\n \"\"\"\n # create a figure object with width and height\n death_and_cases_fig = figure(x_axis_type=\"datetime\", y_axis_type=y_axis_type,\n width=1000, height=400, sizing_mode='fixed')\n # creating columnDataSource object, for the dataframes\n cases_source = ColumnDataSource(cases_dataframe)\n death_sources = ColumnDataSource(death_dataframe)\n # not use scientific numbers on Y-axis\n death_and_cases_fig.yaxis.formatter = BasicTickFormatter(use_scientific=False)\n # add a line renderer using the cases_source's two columns with a label, color and line width to the figure object\n death_and_cases_fig.line(x='Date', y=country_name, source=cases_source, color='Blue',\n line_width=2, legend_label=\"Cases\")\n # add another line renderer using the death_source's two columns with a label, color and line width.\n death_and_cases_fig.line(x='Date', y=country_name, source=death_sources, color='Red',\n line_width=2, legend_label=\"Deaths\")\n # name and field pairs for the Hover tool\n tooltips = [('Date', '@Date{%F}'), (country_name, \"$y{int}\")]\n # formatting scheme of date column\n formatters = {'@Date': 'datetime'}\n # create a Hover tool for the figure with the tooltips and specify the formatting scheme\n death_and_cases_fig.add_tools(HoverTool(tooltips=tooltips, formatters=formatters))\n # get rid of the default toolbar\n death_and_cases_fig.toolbar_location = None\n death_and_cases_fig.title.text = 'Covid cases and deaths'\n death_and_cases_fig.title.text_color = \"midnightblue\"\n death_and_cases_fig.title.text_font_size = \"25px\"\n death_and_cases_fig.xaxis.axis_label = 'Date'\n death_and_cases_fig.yaxis.axis_label = 'Confirmed Cases'\n death_and_cases_fig.legend.location = \"top_left\"\n return death_and_cases_fig\n\n\ndef plot_with_slider(dataframe, country_name, y_axis_name):\n \"\"\"\"\n this function takes a dataframe, y-axis name and country as a parameter,\n creates a plot with a slider and returns the plot.\n \"\"\"\n # create a figure object with width and height\n plot = figure(x_axis_type=\"datetime\", width=1000, height=400, sizing_mode='fixed')\n # creating columnDataSource object, for the dataframes\n source = ColumnDataSource(dataframe)\n # initialize the min and max value of the date\n init_value = (dataframe['Date'].min(), dataframe['Date'].max())\n # configuring date range slider with start date end date and value\n date_range_slider = DateRangeSlider(start=init_value[0], end=init_value[1], value=init_value)\n date_filter = BooleanFilter(booleans=[True] * dataframe.shape[0])\n # not use scientific numbers on Y-axis\n plot.yaxis.formatter = BasicTickFormatter(use_scientific=False)\n # whenever a slider value updates. The date range sliders, value changes.\n date_range_slider.js_on_change(\"value\", CustomJS(args=dict(f=date_filter, cases_source=source), code=\"\"\"\\\n const [start, end] = cb_obj.value;\n f.booleans = Array.from(cases_source.data['Date']).map(d => (d >= start \n && d <= end));\n // Needed because of https://github.com/bokeh/bokeh/issues/7273\n cases_source.change.emit();\n \"\"\"))\n\n # add a circle renderer using the source's two columns.\n plot.circle(x='Date', y=country_name, source=source, view=CDSView(source=source, filters=[date_filter]),\n color='Pink', line_width=0.5)\n # name and field pairs for the Hover tool\n tooltips = [('Date', '@Date{%F}'), (country_name, \"$y{int}\")]\n # formatting scheme of date column\n formatters = {'@Date': 'datetime'}\n # create a Hover tool for the figure with the tooltips and specify the formatting scheme\n plot.add_tools(HoverTool(tooltips=tooltips, formatters=formatters, mode='vline'))\n plot.title.text_color = \"midnightblue\"\n plot.title.text_font_size = \"25px\"\n plot.toolbar.active_drag = None\n plot.toolbar_location = None\n plot.xaxis.axis_label = 'Date'\n plot.yaxis.axis_label = y_axis_name\n\n return column(plot, date_range_slider)\n\n\ndef compare_countries_cumulative_per_million(cases_dataframe, country1, country2):\n \"\"\"\n This function takes cases dataframe, country1, country2 as a parameter, creates a line plot for the cases of the\n two countries and returns the plot.\n \"\"\"\n # convert the values of the dataframe to per million\n converted_dataframe = convert_data_to_per_million(cases_dataframe)\n # create a figure object with width and height\n compare_countries_plot = figure(x_axis_type=\"datetime\", width=1000, height=400, sizing_mode='fixed')\n # creating columnDataSource object, for the dataframes\n cases_source = ColumnDataSource(converted_dataframe)\n compare_countries_plot.yaxis.formatter = BasicTickFormatter(use_scientific=False)\n # add a circle renderers using the source's two columns.\n compare_countries_plot.line(x='Date', y=country1, source=cases_source, color='Green',\n line_width=2, legend_label=country1)\n compare_countries_plot.line(x='Date', y=country2, source=cases_source, color='purple',\n line_width=2, legend_label=country2)\n compare_countries_plot.xaxis.axis_label = 'Date'\n compare_countries_plot.yaxis.axis_label = 'Cases'\n compare_countries_plot.legend.location = \"top_left\"\n compare_countries_plot.toolbar_location = None\n return compare_countries_plot\n\n\n","repo_name":"ishres19/covid_data_visualization","sub_path":"individual_country.py","file_name":"individual_country.py","file_ext":"py","file_size_in_byte":6328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20249012060","text":"from nltk.corpus import wordnet, stopwords\nfrom nltk.tokenize import sent_tokenize, word_tokenize\n\nclass Preprocessor:\n def __init__(self, name):\n \"\"\" Init Preprocessor with its name \"\"\"\n self.name = name\n\n def getSearchContent(self):\n \"\"\" Get the search text user tap \"\"\"\n text = raw_input(\"Search Content: \")\n return text\n\n\n def executePre(self, text):\n \"\"\"\n :param text: A search content the user tap on keyboard\n :return: An array with several synonyms of the key words from search content\n \"\"\"\n # tokenize sentences to words\n word_tokens = word_tokenize(text)\n\n # filter stopwords\n stop_words = set(stopwords.words('english'))\n filterd_sentence = [w for w in word_tokens if not w in stop_words]\n\n # generate synonyms words\n synonyms = []\n for filter_word in filterd_sentence:\n synonyms.append(filter_word)\n for syn in wordnet.synsets(filter_word):\n for l in syn.lemmas():\n synonyms.append(l.name())\n return synonyms","repo_name":"wyfunique/Cloud-Computing-and-Storage-2018-Fall","sub_path":"preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19666653964","text":"import socket\n\n# Detalhe -> IP e porta do servidor são sempre fixas\n\n# Qualquer endereçco IP\nhost = \"\"\n\nport = 12321\n# 0 ... 65535\n# Portas Publicas -> 0 ... 1024\n# HTTP -> 80\n# DNS -> 53\n\n\n# IPv4 e TCP\n# Atribuindo servidor a variavel s\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n\n # Vinclunado no servidor endereço e porta que irão receber conexões\n s.bind((host,port))\n\n # Quantas conexões pendentes podem ficar em fila\n s.listen(1)\n\n print(\"Aguardando Conexões...\")\n conn, addr = s.accept()\n # conn = objeto que representa a conexão entre o cliente e o servidor\n # addr = objeto com informações sobre a conexão\n\n with conn:\n\n print(f\"Conexão Estabelecida com {addr}\")\n\n while True:\n\n # Recebe os dados do cliente -> Array de até 1024 bytes\n data = conn.recv(1024)\n\n msg = data.decode('utf8')\n\n if not data: break\n #if msg == \"EOF\\r\\n: break\n\n print(f\"Recebido: {msg}\")\n\n # Retorna os dados para o cliente\n conn.sendall(data)\n\n","repo_name":"VictorTmelo/Python","sub_path":"Redes Convergentes - Cliente Servidor/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16710661382","text":"import time\nfrom flask import Flask, jsonify\nfrom multiprocessing import Process, Value\n\napp = Flask(__name__)\n\n@app.route('/names', methods=['POST'])\ndef get_tasks():\n\treturn jsonify({'tasks': tasks})\n\n\ndef record_loop(loop_on):\n\twhile True:\n\t\tif loop_on.value == True:\n\t\t\tprint(\"loop running\")\n\t\ttime.sleep(1)\n\n\nif __name__ == \"__main__\":\n\trecording_on = Value('b', True)\n\tp = Process(target=record_loop, args=(recording_on,))\n\tp.start()\n\tapp.run(debug=True, use_reloader=False)\n\tp.join()\n","repo_name":"Jaycar-Electronics/Facial-Recognition-Door-Lock","sub_path":"misc/other-testing-scripts/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"73831251784","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 19 22:07:40 2019\r\n\r\n@author: USER\r\n\"\"\"\r\n\r\n# HEART dISEASE DIAGONOSIS\r\n\r\n# Using ANN classifier\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n# Data Preprocessing \r\ndataset = pd.read_table('processed.cleveland.data', sep = ',', header = None)\r\n\r\nX = dataset.iloc[:,:-1]\r\n\r\n\r\ny_class = dataset.iloc[:,-1]\r\n\r\ny = [item>0 for item in y_class]\r\n\r\n# Replacing missing values with most frequent one\r\nX[11].value_counts()\r\nX[11] = X[11].map({'?':0, '1.0' : 1.0, '2.0': 2.0, '3.0' : 3.0, '0.0' : 0.0})\r\nX[12] = X[12].map({'6.0':6.0, '3.0': 3.0, '7.0' : 7.0, '?':3.0})\r\n\r\nX = X.values\r\n\r\n # Handling categorical Variables\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\n\r\noneHotEncoder = OneHotEncoder(categorical_features = [2])\r\nX = oneHotEncoder.fit_transform(X).toarray()\r\nX = X[:,1:]\r\n \r\noneHotEncoder = OneHotEncoder(categorical_features = [8])\r\nX = oneHotEncoder.fit_transform(X).toarray()\r\nX = X[:,1:]\r\n\r\noneHotEncoder = OneHotEncoder(categorical_features = [13])\r\nX = oneHotEncoder.fit_transform(X).toarray()\r\nX = X[:,1:]\r\n\r\noneHotEncoder = OneHotEncoder(categorical_features = [16])\r\nX = oneHotEncoder.fit_transform(X).toarray()\r\nX = X[:,1:]\r\n\r\n\r\n# Train_test Split\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)\r\n\r\n# Feature Scaling for categorical variable\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc_x = StandardScaler()\r\nX_train = sc_x.fit_transform(X_train)\r\nX_test = sc_x.transform(X_test)\r\n\r\n\r\n\r\n\r\n\r\n# Building an ANN mdoel for classification\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n\r\nclassifier = Sequential()\r\n\r\n# Adding input and first hidden layer\r\nclassifier.add(Dense(units = 10, activation = 'relu', kernel_initializer = 'uniform', input_dim = 18))\r\n\r\n# second Hidden layer\r\nclassifier.add(Dense(units = 10, activation = 'relu', kernel_initializer = 'uniform'))\r\n\r\nclassifier.add(Dense(units = 10, activation = 'relu', kernel_initializer = 'uniform'))\r\n\r\n# output Layer\r\nclassifier.add(Dense(units = 1, activation = 'sigmoid'))\r\n\r\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\r\n\r\n\r\nimport time\r\nstart = time.time()\r\n\r\nclassifier.fit(X_train, y_train, batch_size = 5, epochs = 70)\r\n\r\nelapsed = time.time()-start\r\nprint (elapsed)\r\n\r\n\r\n# Make predictions\r\ny_pred = classifier.predict(X_test)\r\n\r\ny_pred = y_pred > 0.5\r\n\r\nfrom sklearn import metrics\r\nfrom sklearn.model_selection import cross_val_score\r\naccuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)\r\n\r\nprint (accuracies.mean())\r\nprint (metrics.confusion_matrix(y_test, y_pred))\r\nprint (metrics.accuracy_score(y_test, y_pred))\r\n\r\nprint (metrics.f1_score(y_test, y_pred))\r\n\r\n\r\n\r\n","repo_name":"Harshad1994/ML_projects","sub_path":"ann_for_classification.py","file_name":"ann_for_classification.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20251301250","text":"from . import Field\nfrom . import Schema\nfrom .ast import *\nfrom .utils import * \nfrom .utils.exceptions import SQLSyntaxError\nfrom .query_parser_toolbox import *\nfrom .extensions.extended_syntax.registry import getRegistry\nfrom .extensions.extended_syntax.registry_utils import *\n\n# Parses a SQL query string and returns an AST\ndef parse(statement, root_exp = None):\n term = set(terminators)\n\n if root_exp is None:\n root_exp = or_exp \n\n tokens = [ \n token.lower() if token.lower() in term else token\n for token in ExtensibleTokens(statement) \n ]\n\n extended_syntax_registry: 'OrderedDict[Name, RegEntry]' = getRegistry()\n clauses_to_parsers: \\\n 'OrderedDict[SQLClause, OrderedDict[Type[ExtendedSyntax], typing.Tuple[TriggerFunc, ParserFunc]]]' =\\\n RegistryUtils.groupRegistryByClause(extended_syntax_registry)\n exp = root_exp(tokens, clauses_to_parsers)\n if tokens: \n raise SyntaxError('Incomplete statement {}'.format(tokens))\n return exp\n\ndef parse_statement(statement):\n return parse(statement, root_exp=union_stmt)\n\ndef parse_select(relation, statement):\n columns = parse(\n statement, \n root_exp=lambda tokens: select_core_exp(tokens)\n )\n return projection_op(relation, columns)\n\ndef parse_order_by(statement):\n return parse(\n statement, \n root_exp=order_by_core_expr\n )\n\ndef parse_group_by(statement):\n return parse(\n statement, \n root_exp=group_by_core_expr\n )\n","repo_name":"wyfunique/DBSim","sub_path":"dbsim/query_parser.py","file_name":"query_parser.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"81"} +{"seq_id":"26270259644","text":"import requests\nimport json\n\n\n# -------------------------------------------------\n# Input: Json\n# Output: text\n# -------------------------------------------------\nresponse=requests.post(\n 'http://0.0.0.0:5000/prediction',\n json={\"text\":\"KETI is a specialized production technology research institute established in 1988 to promote Korea's electronics and information communication industry. KETI conducts technology research and development, industry support, international standardization, and technology talent cultivation in the field of information and communication to serve as a central player in implementing and advancing the nation's ICT policies. Its main research areas include artificial intelligence, the Internet of Things (IoT), big data, cloud computing, and more.\"}\n).text\nprint(response)\n\n\n# -------------------------------------------------\n# Input: Image + Json\n# Output: Json\n# -------------------------------------------------\ndata = {'filename':\"test.jpg\", 'req_task':\"object detection\"}\nfilename=\"test.jpg\"\nfiles = [\n ('image', (filename, open(filename, 'rb'), 'application/octet')),\n ('data', ('data', json.dumps(data), 'application/json')),\n]\nresponse=requests.post(\n 'http://0.0.0.0:5000/object_detection',\n files=files\n).json()\nprint(response)\n\n\n# -------------------------------------------------\n# Input: Image + Json\n# Output: Image\n# -------------------------------------------------\nfrom PIL import Image\nimport io\ndata = {'filename':\"test.jpg\", 'req_task':\"depth estimation\"}\nfilename=\"test.jpg\"\nfiles = [\n ('image', (filename, open(filename, 'rb'), 'application/octet')),\n ('data', ('data', json.dumps(data), 'application/json')),\n]\nresponse=requests.post(\n 'http://0.0.0.0:5000/depth_estimation',\n files=files\n)\nimage = Image.open(io.BytesIO(response.content))\nimage.save(\"result_depth_estimation.png\")\n\n\n# -------------------------------------------------\n# Input: Image + Json\n# Output: Json(Base64(Image bytes))\n# -------------------------------------------------\nimport base64\ndata = {'filename':\"test.jpg\", 'req_task':\"depth estimation json\"}\nfilename=\"test.jpg\"\nfiles = [\n ('image', (filename, open(filename, 'rb'), 'application/octet')),\n ('data', ('data', json.dumps(data), 'application/json')),\n]\nresponse=requests.post(\n 'http://0.0.0.0:5000/depth_estimation_json',\n files=files\n)\ndata = response.json()[\"data\"]\ndecoded_image = base64.decodebytes(data.encode(\"utf-8\"))\nimage = Image.open(io.BytesIO(decoded_image))\nimage.save(\"result_depth_estimation_json.png\")\n\n\n# -------------------------------------------------\n# Input: Json\n# Output: Audio\n# -------------------------------------------------\nresponse=requests.post(\n 'http://0.0.0.0:5000/text2speech',\n json={\"text\":\"In a new interview, former President Donald Trump refused or avoided answering many specific questions about his conduct on Jan. 6\"}\n)\nopen(\"test.wav\", \"wb\").write(response.content)\n\n\n# -------------------------------------------------\n# Input: Audio + Json\n# Output: Json\n# -------------------------------------------------\ndata = {'filename':\"test.wav\", 'req_task':\"speech to text\"}\nfilename=\"test.wav\"\nfiles = [\n ('audio', (filename, open(filename, 'rb'), 'application/octet')),\n ('data', ('data', json.dumps(data), 'application/json')),\n]\nresponse=requests.post(\n 'http://0.0.0.0:5000/speech2text',\n files=files\n).json()\nprint(response)","repo_name":"MMC-K/Flask-api-template","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38105138090","text":"def fatorial(d,x=False):\n \"\"\"\n Calcula o fatorial de um número\n :param d: o número escolhido\n :param x: (opcional) se você quer ver a conta\n :return: o fatorial do número escolhido.\n \"\"\"\n d1=1\n for c1 in range(1,d+1):\n d1*=c1\n if x==True:\n for c1 in range(d,1,-1):\n print(c1, end=' x ')\n print(f'1 = {d1}')\n else:\n print(d1)\n\nfatorial(5, True)\nhelp(fatorial)","repo_name":"ToledoLBC/Aulas","sub_path":"Python/Exercícios Python/aulas/3.21 - 102.py","file_name":"3.21 - 102.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27135385252","text":"\"\"\"\nDefinition of ListNode\nclass ListNode(object):\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param head: The head of linked list.\n @param val: An integer.\n @return: The head of new linked list.\n \"\"\"\n def insertNode(self, head, val):\n # write your code here\n # 插入:可以插入头, 插入尾, 插入中间\n # 要被插入的 linked list 可以是空, 只有一个 node, 有很多个 node\n node = ListNode(val)\n if head is None:\n # 头是空就是空的 linked list\n # 直接传回 node 就好\n return node\n\n # 头不是空的时候,要插入的地方就是插入头, 插入尾, 或插入中间\n if head.val >= val:\n # 插入头: 有可能头比较大也可能和头相等\n # 这时候都插到头的前面\n node.next = head\n return node\n\n curr = head\n while curr is not None:\n if curr.val < val:\n if curr.next is None:\n # 插入尾\n curr.next = node\n elif curr.next.val >= val:\n # 插入中间\n # 有可能很多个 nodes 和 val 相同\n # 插入在相同的第一个前面,这样就不用管相同的有几个\n node.next = curr.next\n curr.next = node\n curr = curr.next\n\n return head\n","repo_name":"ytatus94/Leetcode","sub_path":"lintcode/lintcode_0219_Insert_Node_in_Sorted_Linked_List.py","file_name":"lintcode_0219_Insert_Node_in_Sorted_Linked_List.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19728524139","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\nclass LinkedList:\n def __init__(self):\n self.head = None \n\n def InsertNode(self, val):\n if self.head == None:\n self.head = val\n else:\n temp = self.head\n while temp.next != None:\n temp = temp.next\n temp.next = val\n \n def SortAndMerge(self):\n ans = []\n temp = self.head\n while temp != None:\n ans.append(temp.data)\n temp = temp.next\n #1->2->3->4->5\n ans.sort()\n temp = self.head\n i = 0\n while i < len(ans):\n temp.data = ans[i]\n i += 1\n temp = temp.next\n\n\n def ViewNode(self):\n temp = self.head\n while temp!= None:\n print(temp.data)\n temp = temp.next\n\nlists = LinkedList()\n# arr = [3,5,2,4,1]\narr = [9,15,0]\nfor i in arr:\n node = Node(i)\n lists.InsertNode(node)\n\nlists.SortAndMerge()\nlists.ViewNode()\n\n\narr = [5,3,4,5,4,3,2,9]\nres = 0\nfor i in arr:\n res = res ^ i\nprint(res) ","repo_name":"aniket0951/LeetcodePractise","sub_path":"GeeksLinkedList/MergeAndSort.py","file_name":"MergeAndSort.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3966539074","text":"\"\"\"\nWritten by Emily Pease\n\nScript to open a layer (shapefile) and determine how many features are present. This is in the\ncontext of watershed delineation. It creates three columns called Site_ID, Site_Name,\nand Area_sq_mi in addition to deleting any extra rows present in the geodataframe\n(attribute table) that the analyst would not want included.\n\nOften times, during watershed delineation, ESRI will create stand-alone raster cells as\npart of the watershed. This script deletes all of those. \n\n\"\"\"\n\n\nimport geopandas as gpd\nimport os\n\nws = gpd.GeoDataFrame.from_file(os.path.join('real.gdb'), layer='ws_poly2')\nprint(str('Number of extra rows in ws.shp is '), ws.intersects(ws).count() -1) # Extra rows, not TOTAL rows\n\nws['Site_ID'] = ''\nws['Site_Name'] = ''\nws['Area_sq_mi'] = ''\n\nif len(ws) > 1:\n ws = ws.loc[ws['Shape_Area'] == max(ws['Shape_Area'])]\n print(ws)\n print(ws.touches(ws))\n ws.to_file('ws_delrows.shp')\nelif len(ws) == 1:\n print(\"You're good to go!\")\n print(\"Thanks :)\")\n","repo_name":"ecpease/GeoScripts","sub_path":"ws_poly_processing.py","file_name":"ws_poly_processing.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2126186246","text":"import boto3\r\n\r\nfrom boto3.dynamodb.conditions import Key\r\n\r\nPKEY_NAME = \"pk\"\r\n# GENERAL_TABLE = \"lost-ark-guild-bot\"\r\n\r\ndynamodb_client = boto3.resource(\"dynamodb\")\r\n\r\n\r\ndef query_index(\r\n table_name: str,\r\n index: str,\r\n key_condition: dict,\r\n filterExpression: str = \"\",\r\n expressionAttributeValues: dict = {},\r\n):\r\n table = dynamodb_client.Table(table_name)\r\n if filterExpression:\r\n response = table.query(\r\n IndexName=index,\r\n KeyConditionExpression=Key(list(key_condition.keys())[0]).eq(\r\n list(key_condition.values())[0]\r\n ),\r\n FilterExpression=filterExpression,\r\n ExpressionAttributeValues=expressionAttributeValues,\r\n )\r\n else:\r\n response = table.query(\r\n IndexName=index,\r\n KeyConditionExpression=Key(list(key_condition.keys())[0]).eq(\r\n list(key_condition.values())[0]\r\n ),\r\n )\r\n return response[\"Items\"]\r\n\r\n\r\ndef get_rows(\r\n table_name: str,\r\n pkey_value: str = None,\r\n filterExpression: str = \"\",\r\n expressionAttributeValues: dict = {},\r\n):\r\n table = dynamodb_client.Table(table_name)\r\n\r\n if pkey_value:\r\n if filterExpression:\r\n response = table.query(\r\n KeyConditionExpression=Key(PKEY_NAME).eq(pkey_value),\r\n FilterExpression=filterExpression,\r\n ExpressionAttributeValues=expressionAttributeValues,\r\n )\r\n return response[\"Items\"]\r\n else:\r\n response = table.get_item(Key={PKEY_NAME: pkey_value})\r\n if response and \"Item\" in response:\r\n return [response[\"Item\"]]\r\n else:\r\n return []\r\n\r\n # otherwise, scan\r\n if filterExpression:\r\n response = table.scan(\r\n FilterExpression=filterExpression,\r\n ExpressionAttributeValues=expressionAttributeValues,\r\n )\r\n return response[\"Items\"]\r\n\r\n return table.scan()[\"Items\"]\r\n\r\n\r\ndef set_rows(table_name: str, pkey_value: str, new_column: dict):\r\n table = dynamodb_client.Table(table_name)\r\n existing_rows = get_rows(table_name, pkey_value)\r\n if not existing_rows:\r\n new_column[PKEY_NAME] = pkey_value\r\n table.put_item(Item=new_column)\r\n else:\r\n for k, v in new_column.items():\r\n for _ in existing_rows:\r\n table.update_item(\r\n Key={PKEY_NAME: pkey_value},\r\n UpdateExpression=f\"set {k}=:s\",\r\n ExpressionAttributeValues={\":s\": v},\r\n )\r\n\r\n\r\ndef increment_counter(table_name: str, pkey_value: str, column_name: str):\r\n table = dynamodb_client.Table(table_name)\r\n existing_rows = get_rows(table_name, pkey_value)\r\n if not existing_rows:\r\n new_column = {PKEY_NAME: pkey_value, column_name: 1}\r\n table.put_item(Item=new_column)\r\n else:\r\n table.update_item(\r\n Key={PKEY_NAME: pkey_value},\r\n UpdateExpression=f\"ADD {column_name} :inc\",\r\n ExpressionAttributeValues={\":inc\": 1},\r\n )\r\n\r\n\r\ndef decrement_counter(\r\n table_name: str,\r\n pkey_value: str,\r\n column_name: str,\r\n default_start: int = 0,\r\n decrement: int = 1,\r\n):\r\n table = dynamodb_client.Table(table_name)\r\n existing_rows = get_rows(table_name, pkey_value)\r\n if not existing_rows:\r\n new_column = {PKEY_NAME: pkey_value, column_name: default_start}\r\n table.put_item(Item=new_column)\r\n else:\r\n table.update_item(\r\n Key={PKEY_NAME: pkey_value},\r\n UpdateExpression=f\"ADD {column_name} :inc\",\r\n ExpressionAttributeValues={\":inc\": -1 * decrement},\r\n )\r\n\r\n\r\ndef delete_item(table_name: str, pkey_value: str):\r\n table = dynamodb_client.Table(table_name)\r\n table.delete_item(Key={PKEY_NAME: pkey_value})\r\n","repo_name":"oozio/lost-ark-guild-bot","sub_path":"utils/dynamodb.py","file_name":"dynamodb.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"3711295736","text":"import os\nfrom flask_wtf import FlaskForm\nfrom werkzeug.utils import secure_filename\nfrom wtforms import FileField, SubmitField\nfrom wtforms.validators import InputRequired\nfrom flask import Flask, render_template, send_from_directory\n\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '12345678'\napp.config['UPLOAD_FOLDER'] = 'static/files'\nfolder = 'static/files'\n\nclass UploadfileForm(FlaskForm):\n file = FileField('File', validators=[InputRequired()])\n submit = SubmitField('Upload')\n\n\n@app.route('/', methods=['GET','POST'])\n\n\ndef home():\n form = UploadfileForm()\n if form.validate_on_submit():\n file = form.file.data\n file.save(os.path.join(os.path.abspath(os.path.dirname(__file__)),app.config['UPLOAD_FOLDER'],secure_filename(file.filename)))\n return 'File has been uploaded - back'\n return render_template('index.html', form=form)\n\n\ndef list_dir(dir):\n file_names = os.listdir(dir)\n\n for file_name in file_names:\n print(file_name)\n\n\nlist_dir(folder)\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"ryu878/flask_upload","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4435731571","text":"import sys\r\ninput=sys.stdin.readline\r\nfrom collections import deque\r\n\r\nn, m=map(int, input().split())\r\ndata=[] \r\nfor i in range(n):\r\n data.append(list(map(int, input().split())))\r\n\r\ndx=[-1,-1,-1,0,0,1,1,1] \r\ndy=[-1,0,1,1,-1,-1,0,1]\r\n\r\ncheckidx=[]\r\n\r\ndef bfs(x, y, checkidx):\r\n q=deque([(x,y)])\r\n visited[x][y]=1\r\n check=[(x, y)]\r\n while q:\r\n X, Y= q.popleft()\r\n for i in range(8):\r\n nx, ny= X+dx[i], Y+dy[i] \r\n if nx<0 or nx>=n or ny<0 or ny>=m:\r\n continue\r\n if visited[nx][ny]==1:\r\n continue\r\n if data[X][Y] None:\n super().__init__()\n\n self.region = region\n self.source = source\n self.start_date = start_date\n self.end_date = end_date\n self.profession_ids = profession_ids\n\n def dict(self):\n return {\n 'region': self.region.id,\n 'source': self.source.id,\n 'start_date': datetime.strftime(self.start_date, \"%Y-%m-%d\"),\n 'end_date': datetime.strftime(self.end_date, \"%Y-%m-%d\"),\n 'profession_ids': self.profession_ids\n }\n\n\nclass SelectedItems:\n def __init__(self,\n profession_id,\n general_fun_ids,\n fun_ids,\n part_ids) -> None:\n super().__init__()\n\n self.profession_id = profession_id\n self.general_fun_ids = general_fun_ids\n self.fun_ids = fun_ids\n self.part_ids = part_ids\n\n def dict(self):\n return {\n 'profession_id': self.profession_id,\n 'general_fun_ids': self.general_fun_ids,\n 'fun_ids': self.fun_ids,\n 'part_ids': self.part_ids\n }\n\n\nclass Selected:\n def __init__(self) -> None:\n super().__init__()\n self.items = {}\n","repo_name":"master8/vacancies_analyzer","sub_path":"dto.py","file_name":"dto.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18490467607","text":"import matplotlib.pyplot as plt \nimport numpy as np \nfrom statsmodels.tsa.stattools import acf, pacf, ccf\nfrom statsmodels.tsa.arima_process import arma2ma, arma2ar\nimport pandas as pd\n\n\ndef arma1(p, q, phi, theta, N):\n ######### SAMPLE ACF ########\n x = np.zeros(N)\n white = np.random.normal(0,1,N)\n if p == 0:\n phi = 0\n if q == 0:\n theta = 0\n for i in range(1, N):\n x[i] = phi*x[i-1] + theta*white[i-1] + white[i]\n \n\n ###### THEORETICAL ACF ARMA(1,1) #######\n h = np.arange(1,10,1)\n top = (1 + theta*phi)*(phi + theta)\n bot = (1 + 2*phi*theta + theta**2)\n rho = (top/bot)*phi**(h-1)\n\n\n # Plot\n fig, ax = plt.subplots(1,3)\n ax[0].plot(x)\n ax[0].set_title('ARMA(%s,%s), $\\\\phi = %s,\\\\theta = %s$'%(p ,q, phi, theta))\n ax[1].stem(acf(x, nlags = 10))\n ax[1].set_title('SAMPLE ACF')\n ax[2].stem(rho)\n ax[2].set_title('THEORETICAL ACF')\n plt.tight_layout()\n plt.show()\n\n\ndef ar1(phi, N):\n x = np.zeros(N)\n white = np.random.normal(0,1,N)\n for i in range(1, N):\n x[i] = phi*x[i-1] + white[i]\n return x\n\n\ndef acf_plot(x, lag):\n plt.stem(acf(x, nlags=lag))\n plt.show()\n\ndef pacf_plot(x, lag):\n plt.stem(pacf(x, nlags=lag))\n plt.show()\n\n\ncmort = np.genfromtxt('cmort.txt', delimiter= '\\t')[1:,1]\nrec = np.genfromtxt('rec.txt', delimiter= '\\t')[1:,1]\ncmort = pd.DataFrame(np.diff(np.log(cmort), 1))\nrec = pd.DataFrame(np.diff(np.log(rec), 1))\n\n\n\n\n\n","repo_name":"MartinRovang/UniversityPhysics","sub_path":"Tidsrekker/Øvelser/Øvelse 10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"8455691781","text":"import unittest\nfrom po_calc.page.page_calc import PageCalc\nfrom po_calc.base.get_driver import GetDriver\nfrom parameterized import parameterized\nfrom po_calc.tool.get_log import get_logging\nfrom po_calc.tool.read_json import read_json\n\nlog = get_logging()\n\ndef get_data():\n arrs = []\n datas = read_json('calc.json')\n for data in datas.values():\n arrs.append((data.get('a'), data.get('b'), data.get('expect')))\n return arrs\n\nclass TestCalc(unittest.TestCase):\n driver = None\n @classmethod\n def setUpClass(cls) -> None:\n cls.driver = GetDriver().get_driver()\n cls.calc = PageCalc(cls.driver)\n\n @classmethod\n def tearDownClass(cls) -> None:\n GetDriver.quit_driver()\n\n @parameterized.expand(get_data())\n def test_calc(self,a,b,expect):\n self.calc.page_calc(a,b)\n try:\n #从json传过来的预期结果需要转成字符串类型再判断是否相等\n self.assertEqual(self.calc.page_get_res(),str(expect))\n except Exception as e:\n self.calc.page_get_img()\n log.error(e)\n\n","repo_name":"lyuyis/test","sub_path":"test/test_calc.py","file_name":"test_calc.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27530578448","text":"from collections import deque\n\nn = int(input())\na,b = map(int,input().split())\nm = int(input())\ngraph = [[] for _ in range(n+1)]\nvisited = [0]*(n+1)\nfor _ in range(m):\n x,y = map(int,input().split())\n graph[x].append(y)\n graph[y].append(x)\n\ndef dfs(graph,v,visited):\n for i in graph[v]:\n if not visited[i]:\n visited[i] = visited[v]+1\n dfs(graph,i,visited)\n\ndef bfs(graph,v,visited):\n q = deque()\n q.append(v)\n while q:\n p = q.popleft()\n # print(p)\n for i in graph[p]:\n if not visited[i]:\n visited[i] = visited[p]+1\n q.append(i)\nbfs(graph,a,visited)\n# dfs(graph,a,visited)\nif visited[b]>0:\n print(visited[b])\nelse:\n print(-1)\n","repo_name":"sshee0123/Baekjoon","sub_path":"2644..py","file_name":"2644..py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"38988235324","text":"\"\"\"Module to store GenreFinder class.\"\"\"\nimport csv\nimport json\n\nfrom genre_parser.constants import *\nfrom genre_parser.objects import Book, Genre, Keyword\nfrom urllib import request\nfrom genre_parser.constants import BOOKS_ENDPOINT_PAGED\n\n\nclass GenreFinder:\n \"\"\"Driving class to find genre of the books from description.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Construct new GenreFinder object.\"\"\"\n self.keywords = dict()\n self.books = list()\n\n def find_genre(self) -> None:\n \"\"\"Find top three genres for each book.\"\"\"\n # iterate through each book\n for book_index, book in enumerate(self.books):\n # print(book.name)\n for keyword in self.keywords.values():\n # find number of times the keyword is present in the description\n keyword_count = book.description.count(keyword.name)\n if keyword_count != 0:\n for genre in keyword.genres:\n # if genre is already added to book, use it\n # create new Genre object otherwise\n self.books[book_index].genres[genre] = self.books[\n book_index\n ].genres.get(genre, Genre(genre))\n # update average of the genre\n self.books[book_index].genres[genre].update_average(keyword, keyword_count)\n\n # for genre in book.get_top_genres():\n # print(f\"{genre.name}, {int(genre.score)}\")\n\n def top_genre_across(self) -> None:\n \"\"\"\n Print the top genre across all books\n\n :return: None\n \"\"\"\n genres = {}\n for book in self.books:\n for genre in book.genres.values():\n cur_value = genres.get(genre.name)\n if cur_value is None:\n cur_value = 0\n genres[genre.name] = cur_value + genre.get_score(genre)\n\n max_genre = None\n max_genre_score = -1e10\n for genre in genres.keys():\n print(f\"Genre: {genre} has the score: {genres[genre]}\")\n if genres[genre] > max_genre_score:\n max_genre = genre\n max_genre_score = genres[genre]\n\n print(f\"The {max_genre} has highest score of {max_genre_score}\")\n\n def get_keywords(self, path_to_csv: str) -> None:\n \"\"\"\n Populate keywords from provided CSV file.\n\n :param path_to_csv: path to the CSV file\n :return: None\n \"\"\"\n with open(path_to_csv, 'r') as csvFile:\n data = csv.DictReader(csvFile)\n\n for value in data:\n # if keyword is not present already\n if self.keywords.get(value[CSV_KEYWORD_KEY].lstrip()) is None:\n # removing beginning white space from keyword name and weights\n self.keywords[value[CSV_KEYWORD_KEY].lstrip()] = Keyword(\n value[CSV_KEYWORD_KEY].lstrip(),\n value[CSV_GENRE_KEY].lstrip(),\n int(value[CSV_POINTS_KEY].lstrip())\n )\n # if keyword already present, just add genre and it's points\n else:\n # removing beginning white space from keyword name and weights\n self.keywords[value[CSV_KEYWORD_KEY].lstrip()].genres.append(\n value[CSV_GENRE_KEY].lstrip()\n )\n self.keywords[value[CSV_KEYWORD_KEY].lstrip()].points[\n value[CSV_GENRE_KEY].lstrip()\n ] = int(value[CSV_POINTS_KEY].lstrip())\n\n def get_books(self, path_to_json=None) -> None:\n \"\"\"\n Populate books from provided JSON file.\n\n :param path_to_json: path to the JSON file\n :return: None\n \"\"\"\n if path_to_json is not None:\n with open(path_to_json, 'r') as jsonFile:\n data = json.load(jsonFile)\n\n # take from endpoint if path not provided\n else:\n data = list()\n cur_page = 72\n while True:\n print(f\"getting response from page number {cur_page}\")\n response = request.urlopen(BOOKS_ENDPOINT_PAGED+str(cur_page))\n resp_data = json.loads(response.read())\n print(f\"response received from page number {cur_page}\")\n if len(resp_data) == 0:\n break\n data.extend(resp_data)\n cur_page += 1\n\n print(f\"Total number of books fetched are {len(data)}\")\n\n for value in data:\n self.books.append(Book(\n value.get(JSON_BOOK_NAME_KEY),\n value.get(JSON_BOOK_DESC_KEY))\n )\n\n self.books = sorted(self.books, key=lambda book: book.name)\n","repo_name":"thealphadollar/TopicParser","sub_path":"genre_parser/genreFinder.py","file_name":"genreFinder.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41117475322","text":"# Walk the matrix in clockwise direction\n\ndef WalkAMatrix(matrix):\n result = []\n row_count = len(matrix)\n col_count = len(matrix[0])\n\n start_row = 0\n end_row = row_count - 1\n start_col = 0\n end_col = col_count - 1\n\n while(end_row >= start_row and end_column >= start_column):\n # Walk across the top starting row for each column from beginning to end\n # This is left-right across the top\n for column in range(start_column, end_column + 1):\n # Add the item to result in order\n result.append(matrix[start_row][column])\n\n # Increment our start row since we visited each value\n start_row += 1\n\n # Walk top-bottom for the end column\n for row in range(start_row, end_row + 1):\n result.append(matrix[row][end_column])\n\n end_column -= 1\n\n # Since we increment start_row, we need to make sure we are still in bounds\n if end_row >= start_row:\n # Walk left-right on the bottom row\n for column in range(end_column, start_column - 1, -1):\n result.append(matrix[end_row][column])\n\n end_row -= 1\n\n # Since we increment start_row, we need to make sure we are still in bounds\n if end_column >= start_column:\n # walk bottom-top for the start column\n for row in range(end_row, start_row - 1, -1):\n result.append(matrix[row][start_column])\n\n\n start_column += 1\n\n return result","repo_name":"ajrame3/CodeForInterviews","sub_path":"arrays/WalkAMatrix.py","file_name":"WalkAMatrix.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41749122378","text":"# Functions goes here\n\n\ndef num_check(question):\n\n error = \"It should contain a number more than zero.\"\n\n valid = False\n while not valid:\n response = (input(question))\n\n if response.lower() == \"xxx\":\n return \"xxx\"\n\n else:\n try:\n if float(response) <=0:\n print(error)\n else:\n return float(response)\n\n except ValueError:\n print(error)\n\n\n# Main Routine\n\nprint(\"*** Trapezium Area / Perimeter ***\")\n\nbottom_base = num_check(\"What is the bottom base:\")\ntop_base = num_check(\"What is the top base:\")\nheight = num_check(\"What is the height:\")\nside_1 = num_check(\"What is side 1:\")\nside_2 = num_check(\"What is side 2:\")\n\n\nbottom_base = int(bottom_base)\ntop_base = int(top_base)\nheight = int(height)\nside_1 = int(side_1)\nside_2 = int(side_2)\n\n#area = bottom_base + top_base / 2 * height\narea = 0.5 * (bottom_base + top_base) * height\nperimeter = bottom_base + top_base + side_1 + side_2\n\nprint(\"The area is {}\".format(area))\nprint(\"The perimeter is {}\".format(perimeter))\n\nprint(\"*** Trapezium Area / Perimeter ***\")\n","repo_name":"massey-high-school/2020-91896-7-assessment-zionapril","sub_path":"2020-91896-7-assessment-zionapril-master/Area_Perimeter_Trapezium.py","file_name":"Area_Perimeter_Trapezium.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73793303304","text":"from math import*\n \nif __name__ == '__main__':\n t = int(input())\n cnt = 1\n while cnt <= t:\n cnt += 1\n n, x, m = map(float, input().split())\n dem = 0\n while n <= m:\n lai = n*(x/100)\n n += lai\n dem += 1\n print(dem)","repo_name":"ThangQT2606/PYTHON","sub_path":"PY01007.py","file_name":"PY01007.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17138941836","text":"\"\"\"\nThis module provides an interface to SMV files which often end in the IMG format.\nThese are commonly used in microED.\n\nSee https://wiki.uni-konstanz.de/ccp4/index.php/SMV_file_format for more details.\n\nNote\n----\nGeneral users:\n Use the simplified smv.smvReader() function to load the data and meta\n data as a python dictionary.\n\nAdvanced users and developers:\n Access the file internals through the smv.fileSMV() class.\n\n\"\"\"\n\nfrom pathlib import Path\nimport datetime\n\nimport numpy as np\n\nclass fileSMV:\n \"\"\"Class to represent SMV files.\n\n Attributes\n ----------\n file_name : str\n The name of the file\n file_path : pathlib.Path\n A pathlib.Path object for the open file\n fid : file\n The file handle to the opened MRC file.\n dataType : np.dtype\n The numpy dtype of the data.\n dataSize : np.ndarray\n The number of pixels along each dimension. Corresponds to the shape attribute of a np.ndarray\n num_header_bytes : \n The number of bytes in the header. Usually 512.\n header_info : dict\n A dictionary containing the header meta data.\n\n camera_length=110, lamda=0.0197, pixel_size=0.01, beam_center=None, binned_by=1\n \"\"\"\n \n def __init__(self, filename, verbose=False):\n \"\"\" Initialize opening the file\n \n Parameters\n ----------\n filename : str or pathlib.Path or file object\n String pointing to the filesystem location of the file.\n\n verbose : bool, optional, default False\n If True, debug information is printed.\n \n \"\"\"\n \n self._verbose = verbose\n self._expected_keys = ('HEADER_BYTES', 'DIM', 'BYTE_ORDER', 'TYPE', 'SIZE1', 'SIZE2', 'PIXEL_SIZE', \n 'WAVELENGTH', 'DISTANCE', 'PHI', 'BEAM_CENTER_X', 'BEAM_CENTER_Y', 'BIN', \n 'DATE', 'DETECTOR_SN', 'OSC_RANGE', 'OSC_START', 'IMAGE_PEDESTAL', 'TIME', \n 'TWOTHETA')\n self._data_types = {'unsigned_short': np.uint16} # convert SMV types with numpy dtypes\n self.header_info = {}\n self.num_header_bytes = None\n self.dataType = None\n self.dataSize = [0, 0]\n self._v = verbose\n \n if hasattr(filename, 'read'):\n self.fid = filename\n try:\n self.file_name = self.fid.name\n except AttributeError:\n self.file_name = None\n else:\n # check filename type. Prefer pathlib.Path\n if isinstance(filename, str):\n filename = Path(filename)\n elif isinstance(filename, Path):\n pass\n else:\n raise TypeError('Filename is supposed to be a string or pathlib.Path')\n \n self.file_path = filename\n self.file_name = self.file_path.name\n \n try:\n self.fid = open(self.file_path, 'rb')\n except IOError:\n print('Error reading file: \"{}\"'.format(self.file_path))\n raise\n except:\n raise\n \n if not self._validate():\n raise IOError('Not an SMV file: {}'.format(self.file_path))\n \n self.readHeader()\n self.parseHeader()\n \n def __del__(self):\n \"\"\"Destructor which also closes the file\n\n \"\"\"\n if not self.fid.closed:\n if self._v:\n print('Closing input file: {}'.format(self.file_path))\n self.fid.close()\n\n def __enter__(self):\n \"\"\"Implement python's with statement\n\n \"\"\"\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n \"\"\"Implement python's with statement\n and close the file via __del__()\n \"\"\"\n self.__del__()\n return None\n \n def _validate(self):\n first_line = self.fid.read(15).decode('UTF-8')\n if first_line == '{\\nHEADER_BYTES=':\n bytes_str = self.fid.readline().decode('UTF-8')\n self.num_header_bytes = int(bytes_str.strip().strip(';'))\n return True\n else:\n return False\n \n def readHeader(self):\n \"\"\"Read the header information and conver to numbers or strings.\"\"\"\n \n self.fid.seek(0, 0)\n head = self.fid.read(self.num_header_bytes).decode('UTF-8').split('\\n')\n for line in head:\n if '=' in line:\n key, val = line.split('=')\n val = val.strip(';')\n if key in self._expected_keys:\n try:\n self.header_info[key] = float(val)\n try:\n self.header_info[key] = int(val)\n except:\n pass # it is a float\n except:\n self.header_info[key] = val # not a number\n \n def parseHeader(self):\n \"\"\"Parse the header dictionary for relelvant information to read the data in the file.\"\"\"\n for key, val in self.header_info.items():\n if key == 'SIZE1':\n self.dataSize[1] = val # column\n elif key == 'SIZE2':\n self.dataSize[0] = val # row\n elif key == 'TYPE':\n try:\n self.dataType = self._data_types[val]\n except KeyError:\n raise(f'File data type not supported: {val}')\n \n def getDataset(self):\n self.readHeader()\n self.parseHeader()\n \n self.fid.seek(self.num_header_bytes, 0)\n data = np.fromfile(self.fid, count=self.dataSize[0] * self.dataSize[1], dtype=self.dataType)\n data = data.reshape(self.dataSize)\n data_out = {}\n data_out['data'] = data\n return data_out\n \ndef smvWriter(out_path, dp, camera_length=110, lamda=0.0197, pixel_size=0.01, beam_center=None, binned_by=1):\n \"\"\" Write out data as a SMV (.img) formatted file\n Header is 512 bytes of zeros and then filled with ASCII.\n \n Note:\n - only little endian is supported\n - only uint16 is supported\n - ony 2D data is supported\n - some other meta data (PHI, DATE, etc.) is populated with hard coded values\n \n Parameters\n ----------\n camera_length : float\n The calibrated camera length (not the label) in mm. Default is 110 mm.\n lamda : float\n The wavelength of the radiation in Ansgroms. Default is 0.0197 for 300 kV electrons\n pixel_size : float\n Physical detector pixel size in mm. Default is 0.01 mm (10 microns)\n beam_center : tuple\n The location of the center beam in column, row format in mm (not pixels!)\n binned_by : int\n The binning applied to the original data. This is necessary for proper\n calibrations of detector distances and beam center. Default is 1.\n \"\"\"\n if dp.dtype != np.uint16:\n raise TypeError(\"Only uint16 data type is supported.\")\n dtype = 'unsigned_short'\n \n if not beam_center:\n beam_center = [ii / 2 * pixel_size for ii in dp.shape]\n \n # make sure binned_by is an integer\n binned_by = int(binned_by)\n \n # Write 512 bytes of zeros\n with open(out_path, 'wb') as f0:\n f0.write(np.zeros(512, dtype=np.uint8))\n # Write the header over the zeros as needed\n with open(out_path, 'r+') as f0:\n f0.write(\"{\\nHEADER_BYTES=512;\\n\")\n f0.write(\"DIM=2;\\n\")\n f0.write(\"BYTE_ORDER=little_endian;\\n\")\n f0.write(f\"TYPE={dtype};\\n\")\n f0.write(f\"SIZE1={dp.shape[1]};\\n\") # size 1 is columns\n f0.write(f\"SIZE2={dp.shape[0]};\\n\") # size 2 is rows\n f0.write(f\"PIXEL_SIZE={pixel_size};\\n\") # physical pixel size in mm\n f0.write(f\"WAVELENGTH={lamda};\\n\") # wavelength in Angstroms\n f0.write(f\"DISTANCE={int(camera_length)};\\n\") # in mm\n f0.write(\"PHI=0.0;\\n\")\n f0.write(f\"BEAM_CENTER_X={beam_center[1]};\\n\") # in mm (not pixels!)\n f0.write(f\"BEAM_CENTER_Y={beam_center[0]};\\n\") \n f0.write(f\"BIN={binned_by}x{binned_by};\\n\")\n f0.write(\"DATE=Fri Dec 31 23:59:59 1999;\\n\")\n f0.write(\"DETECTOR_SN=unknown;\\n\")\n f0.write(\"OSC_RANGE=1.0;\\n\")\n f0.write(\"OSC_START=0;\\n\")\n f0.write(\"IMAGE_PEDESTAL=0;\\n\")\n f0.write(\"TIME=1.0;\\n\")\n f0.write(\"TWOTHETA=0;\\n\")\n f0.write(\"}\\n\")\n # Append the binary image data at the end of the header\n with open(out_path, 'rb+') as f0:\n f0.seek(512, 0)\n f0.write(dp)\n\n \ndef smvReader(file_name, verbose=False):\n \"\"\" A simple function to read open a SMV, parse the header, and read the\n data and meta data.\n\n Parameters\n ----------\n file_name : str or pathlib.Path\n The path to the file to load.\n\n Returns\n -------\n out : dict\n A dictionary containing the data and interesting metadata.\n\n Example\n -------\n Simply read in all data from disk into memory. This assumes the dataset is 3 dimensional:\n >> from ncempy.io.smv import smvReader\n >> import matplotlib.pyplot as plt\n >> mrc1 = smvReader.('filename.mrc')\n >> plt.imshow(mrc1['data'][0, :, :]) #show the first image in the data set\n \"\"\"\n if isinstance(file_name, str):\n file_name = Path(file_name)\n\n with fileSMV(file_name) as f1: # open the file and init the class\n im1 = f1.getDataset() # read in the dataset\n \n # Calculate the pixel size in inverse angstroms according to the geometry in the header\n alpha = self.header_info['PIXEL_SIZE'] / self.header_info['CAMERA_LENGTH'] # angle across 1 pixel\n dp_pixel_distance = alpha / self.header_info['WAVELENGTH'] * 1e-10 # divide by wavelength to get distance in Angstroms\n pixelSize = (dp_pixel_distance, dp_pixel_distance)\n self.dataOut = {'pixelSize': pixelSize, 'pixelUnit':'A', 'filename': self.file_name, \n 'BIN':self.header_info['BIN']}\n print('Warning: pixelSize does not take binning into account.')\n \n return im1 # return the data and metadata as a dictionary\n","repo_name":"ercius/openNCEM","sub_path":"ncempy/io/smv.py","file_name":"smv.py","file_ext":"py","file_size_in_byte":10096,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"81"} +{"seq_id":"7915940411","text":"import cv2\nimport pytesseract\n\ntry:\n from PIL import Image\nexcept ImportError:\n import Image\n\npytesseract.pytesseract.tesseract_cmd=r'/usr/local/bin/tesseract'\n\n\ndef ocr_core(img):\n text = pytesseract.image_to_string(img,lang='eng')\n return text\n\nimg = cv2.imread(\"invoice-image.jpeg\")\n\n# get grayscale image\ndef get_grayscale(image):\n return cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\n\n# Noise removal\ndef remove_noise(image):\n return cv2.medianBlur(image,5)\n\n# thresholding\n\ndef threshold(image):\n return cv2.threshold(image,0,255,cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n\nimg = get_grayscale(img)\nimg = threshold(img)\nimg = remove_noise(img)\n\nprint(ocr_core(img))\n\nmon_texte_extrait = pytesseract.image_to_string(Image.open(\"invoice-image.jpeg\"),lang='eng',config=\"--oem 3 --psm 6\")\nprint(\"#\"*10+\" RÉSULTAT EXTRACTION \"+\"#\"*10)\nprint(mon_texte_extrait)\nprint(\"#\"*40)\nprint(\"\\n\\n\\n\")\nprint(\"#\"*15+\" FOR ORIENTATION AND SCRIPT DETECTION \"+\"#\"*15)\n\nprint(pytesseract.image_to_osd(Image.open(\"invoice-image.jpeg\")))\nprint(\"#\"*50)\nprint(\"\\n\\n\\n\")\n\nprint(\"#\"*15+\" BOX BOUNDARIES AROUND ELEMENT \"+\"#\"*15)\nprint(pytesseract.image_to_boxes(Image.open(\"invoice-image.jpeg\")))\nprint(\"#\"*50)","repo_name":"morellawson/ocr-anaclic-project","sub_path":"scripts/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71395685385","text":"# 1 Создать функцию, принимающую в качестве параметра список (list) \n# целых чисел и возвращающую сумму квадратов всех нечетных чисел в данном списке.\n\ndef square_it(numArr):\n return sum(i*i for i in numArr if i % 2 != 0)\n\nprint(square_it([1, 2, 3, 4, 5]))\n\n# 2 Создать итератор, который принимает в конструктор итерируемый объект и признак чет/нечет \n# и возвращает только элементы с четной или нечетной (в соответствии с признаком, переданным в конструктор) \n# позицией в итерируемом объекте. Например, для последовательности a, b, c, d чет вернет b, d (2-й и 4-й элементы), \n# а нечет a, c (1-й и 3-й элементы). Сделать в двух вариантах:\n# 1) Класс итератор.\n# 2) Генератор.\n\nclass OddEvenIterator:\n def __init__(self, numArr, factor):\n self.numArr = numArr\n self.factor = factor\n self.counter = -1\n self.limit = 0\n\n def __iter__(self):\n return self\n\n def __next__(self): \n if self.factor == 'even':\n self.counter += 2\n return self.numArr[self.counter]\n elif self.factor == 'odd':\n if self.limit == 0:\n self.counter = 0\n self.limit = 1\n return self.numArr[self.counter]\n else: \n self.counter += 2\n return self.numArr[self.counter]\n\na = OddEvenIterator([1, 2, 3, 4, 5], 'odd')\nprint(next(a))\nprint(next(a))\nprint(next(a))\n\n# 3 Создать функцию принимающую на вход массив целых чисел, все элементы которого имеют значение либо 1, либо 2.\n# Функция должна сортировать его так, чтобы сперва шли 2, а потом 1 (то есть, вход вида [1,2,2,1,1,2,1], \n# выход вида [2,2,2,1,1,1,1]). Не использовать стандартных\n# функций сортировки из библиотеки Питона и сделать сортировку за линейное время и один проход массива.\n\ndef sortArr(numArr):\n one_list = []\n two_list = []\n for i in range(len(numArr)):\n if numArr[i] == 1:\n one_list.append(numArr[i])\n if numArr[i] == 2:\n two_list.append(numArr[i])\n return one_list + two_list\n\n \nprint(sortArr([1, 2, 1, 2, 1, 1]))\n\n\n\n\n","repo_name":"fedotovdmitriy14/python_tasks","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34379407218","text":"from django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom cityScrapperApp.SalesForce import SalesForceClass\nfrom cityScrapperApp.models import ScrapDetails\n\n__author__ = 'eMaM'\n\n#@receiver(post_save, sender=ScrapDetails)\ndef ScrapDetailsSingal(sender, instance, **kwargs):\n SalesForceInstance = SalesForceClass()\n if instance.phone:\n SalesForceInstance.check_and_create_lead(last_name=instance.name,\n phone=instance.phone,\n campaign_source=(str(instance.scrap.name).split(',')[0])[:25],\n lead_source='AhmedFlipKey' if instance.scrap.source == 'FlipKey' else 'AhmedCL',\n website=instance.url,\n company='FlipKey' if instance.scrap.source == 'FlipKey' else 'Criaglist',\n tags='HomeAway, scrape, house',\n email='',\n is_international=True)\n","repo_name":"eMaM1921990/cityScrap","sub_path":"cityScrapperApp/Signals.py","file_name":"Signals.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41700059374","text":"import pathlib\nfrom typing import Dict, Tuple, Union\nimport numpy as np\nimport tifffile\nfrom skimage.transform import resize as skimage_resize\nfrom ophys_etl.utils.array_utils import normalize_array\nfrom ophys_etl.modules.mesoscope_splitting.tiff_splitter import (\n AvgImageTiffSplitter)\nfrom ophys_etl.modules.mesoscope_splitting.tiff_metadata import (\n ScanImageMetadata)\n\n\ndef stitch_tiff_with_rois(\n full_field_path: Union[pathlib.Path, str],\n avg_surface_path: Union[pathlib.Path, str]) -> np.ndarray:\n \"\"\"\n Create the stitched full field image with ROIs from the\n average surface TIFF inserted\n\n Parameters\n ----------\n full_field_path: Union[pathlib.Path, str]\n Path to the full field TIFF file\n\n avg_surface_path: Union[pathlib.Path, str]\n Path to the averaged surface TIFF file\n\n Returns\n -------\n stitched_roi_img: np.ndarray\n \"\"\"\n\n if isinstance(full_field_path, str):\n full_field_path = pathlib.Path(full_field_path)\n\n if isinstance(avg_surface_path, str):\n avg_surface_path = pathlib.Path(avg_surface_path)\n\n full_field_img = stitch_full_field_tiff(full_field_path)\n full_field_metadata = ScanImageMetadata(full_field_path)\n avg_splitter = AvgImageTiffSplitter(avg_surface_path)\n\n stitched_roi_img = _insert_rois_into_surface_img(\n full_field_img=full_field_img,\n full_field_metadata=full_field_metadata,\n avg_image_splitter=avg_splitter)\n\n return stitched_roi_img\n\n\ndef stitch_full_field_tiff(\n tiff_path: pathlib.Path) -> np.ndarray:\n \"\"\"\n Create the stitched version of the full-field average\n image from a full-field TIFF file.\n\n Parameters\n ----------\n tiff_path: pathlib.Path\n Path to the full field TIFF file\n\n Returns\n -------\n stitched_img: np.ndarray\n The full field image as an array of np.uint16\n \"\"\"\n\n img = _average_full_field_tiff(tiff_path)\n tiff_metadata = ScanImageMetadata(tiff_path)\n img = _stitch_full_field_tiff(\n tiff_metadata=tiff_metadata,\n avg_img=img)\n return normalize_array(array=img, dtype=np.uint16)\n\n\ndef _insert_rois_into_surface_img(\n full_field_img: np.ndarray,\n full_field_metadata: ScanImageMetadata,\n avg_image_splitter: AvgImageTiffSplitter) -> np.ndarray:\n \"\"\"\n Insert thumbnails from avg_image_splitter into a stitched\n full field image.\n\n Parameters\n ----------\n full_field_img: np.ndarray\n The stitched full field image into which we are inserting\n the ROI thumbnails\n\n full_field_metadata: ScanImageMetadata\n Metadata read from the original full_field_img TIFF file\n\n avg_image_splitter: AvgImageTiffSplitter\n Splitter which will provide the ROI thumbnails (using\n get_avg_img(i_roi=i_roi)\n\n Returns\n -------\n output_img: np.ndarray\n A copy of full_filed_img with the ROI thumbnails\n from avg_image_splitter superimposed in the correct\n location.\n\n Notes\n -----\n One of the first steps is ot make a copy of full_field_img,\n so this method does not alter full_field_img in place.\n \"\"\"\n\n (ff_resolution,\n ff_size) = _validate_all_roi_same_size(full_field_metadata)\n\n (origin_col,\n origin_row) = _get_origin(full_field_metadata)\n\n output_img = np.copy(full_field_img)\n\n physical_to_pixels = (ff_resolution[0]/ff_size[0],\n ff_resolution[1]/ff_size[1])\n\n for i_roi in range(avg_image_splitter.n_rois):\n roi_size = avg_image_splitter.roi_size(i_roi=i_roi)\n roi_resolution = avg_image_splitter.roi_resolution(i_roi=i_roi)\n roi_center = avg_image_splitter.roi_center(i_roi=i_roi)\n avg_img = avg_image_splitter.get_avg_img(i_roi=i_roi, z_value=None)\n\n roi_conversion = (roi_resolution[0]/roi_size[0],\n roi_resolution[1]/roi_size[1])\n\n # physical_to_pixels and roi_conversion are in units of\n # pixels per degree; if the ROI has a larger pixels per\n # degree than the destination image, the ROI has to be\n # downsampled to a smaller pixel grid to fit into the\n # full field image.\n rescaling_factor = (physical_to_pixels[0]/roi_conversion[0],\n physical_to_pixels[1]/roi_conversion[1])\n\n avg_img_shape = avg_img.shape\n\n # remember the difference between XY coordinates and (row, col)\n # coordinates\n new_rows = np.round(avg_img_shape[0]*rescaling_factor[1])\n new_rows = new_rows.astype(int)\n\n new_cols = np.round(avg_img_shape[1]*rescaling_factor[0])\n new_cols = new_cols.astype(int)\n\n avg_img = skimage_resize(avg_img,\n output_shape=(new_rows, new_cols))\n\n avg_img = normalize_array(\n array=avg_img,\n dtype=np.uint16)\n\n row0 = roi_center[1]-roi_size[1]/2\n row0 -= origin_row\n row0 = np.round(row0*physical_to_pixels[1]).astype(int)\n\n col0 = roi_center[0]-roi_size[0]/2\n col0 -= origin_col\n col0 = np.round(col0*physical_to_pixels[0]).astype(int)\n\n row1 = row0+avg_img.shape[0]\n col1 = col0+avg_img.shape[1]\n\n output_img[row0:row1,\n col0:col1] = avg_img\n\n return output_img\n\n\ndef _average_full_field_tiff(\n tiff_path: pathlib.Path) -> np.ndarray:\n \"\"\"\n Read in the image data from a full field TIFF image and average\n over slices and volumes.\n\n Parameters\n ----------\n tiff_path: pathlib.Path\n Path to the TIFF file\n\n Returns\n -------\n avg_img: np.ndarray\n \"\"\"\n metadata = ScanImageMetadata(tiff_path)\n\n # accumulate the pages one-by-one to avoid loading a\n # large numpy array into memory needlessly\n page_ct = 0\n avg_img = None\n with tifffile.TiffFile(tiff_path, mode='rb') as in_file:\n for page in in_file.pages:\n page_ct += 1\n\n # must cast as float to avoid overflow errors\n arr = page.asarray().astype(float)\n\n if avg_img is None:\n avg_img = arr\n else:\n avg_img += arr\n\n avg_img = avg_img / page_ct\n\n # validate that the number of pages in the tiff file\n # was as expected\n expected_n_pages = metadata.numVolumes*metadata.numSlices\n if page_ct != expected_n_pages:\n msg = f\"{tiff_path}\\n\"\n msg += f\"numVolumes: {metadata.numVolumes}\\n\"\n msg += f\"numSlices: {metadata.numSlices}\\n\"\n msg += f\"implies n_pages: {expected_n_pages}\\n\"\n msg += f\"actual n_pages: {page_ct}\"\n raise ValueError(msg)\n\n return avg_img\n\n\ndef _get_stitched_tiff_shapes(\n tiff_metadata: ScanImageMetadata,\n avg_img: np.ndarray) -> Dict:\n \"\"\"\n Get the final shape for the stitched TIFF to be produced\n for a given full field TIFF\n\n Parameters\n ----------\n tiff_metadata: ScanImageMetadata\n The metadata object associated with this avg_img\n\n avg_img: np.ndarray\n Average image produced by _average_full_field_tiff\n\n Returns\n -------\n shape_dict: Dict\n 'shape': the final shape of the stitched tiff\n 'gap': the gap (in pixels) between columns in the final stitched image\n \"\"\"\n\n # Make sure that every ROI only has one scanfield\n for roi in tiff_metadata.defined_rois:\n if not isinstance(roi['scanfields'], dict):\n msg = f\"{tiff_metadata.file_path}\\n\"\n msg += \"contains an ROI with more than one scanfield;\\n\"\n msg += \"uncertain how to handle this case\"\n raise ValueError(msg)\n\n # Make sure that every ROI has the same size in pixels as determined\n # by pixelResolutionXY\n resolution = None\n for i_roi in range(len(tiff_metadata.defined_rois)):\n this_resolution = tiff_metadata.roi_resolution(i_roi)\n if resolution is None:\n resolution = this_resolution\n else:\n if resolution != this_resolution:\n msg = f\"{tiff_metadata.file_path}\\n\"\n msg += \"contains ROIs with different pixel resolutions;\\n\"\n msg += \"uncertain how to handle this case\"\n raise ValueError(msg)\n\n n_rois = len(tiff_metadata.defined_rois)\n\n # image coordinates...\n stitched_shape = (resolution[1],\n resolution[0]*n_rois)\n\n gap = (avg_img.shape[0] - resolution[1]*n_rois)//(n_rois-1)\n\n # check that avg_img has expected shape based on this finding\n expected_avg_shape = (n_rois*stitched_shape[0]+(n_rois-1)*gap,\n stitched_shape[1]//n_rois)\n\n if avg_img.shape != expected_avg_shape:\n msg = f\"{tiff_metadata.file_path}\\n\"\n msg += \"expected average over pages to have shape \"\n msg += f\"{expected_avg_shape}\\n\"\n msg += f\"got {avg_img.shape}\\n\"\n msg += \"unsure how to proceed with stitching\"\n raise ValueError(msg)\n\n return {'shape': stitched_shape, 'gap': gap}\n\n\ndef _validate_all_roi_same_size(\n metadata: ScanImageMetadata) -> Tuple[Tuple[int, int],\n Tuple[float, float]]:\n \"\"\"\n Scan through the ROIs in the ScanImageMetadata\n and verify that they all have the same\n sizeXY and pixelResolutionXY\n\n Returns\n -------\n pixelResolutionXY\n\n sizeXY\n \"\"\"\n\n resolution = None\n physical_size = None\n\n for i_roi in range(metadata.n_rois):\n this_resolution = metadata.roi_resolution(i_roi)\n this_size = metadata.roi_size(i_roi)\n if resolution is None:\n resolution = this_resolution\n physical_size = this_size\n else:\n if not np.allclose(this_size, physical_size):\n msg = f\"{metadata.file_path}\\n\"\n msg += \"has ROIs with different physical units (sizeXY)\"\n raise ValueError(msg)\n if not this_resolution == resolution:\n msg = f\"{metadata.file_path}\\n\"\n msg += \"has ROIs with different pixel resolutions\"\n raise ValueError(msg)\n\n return (resolution, physical_size)\n\n\ndef _get_origin(\n metadata: ScanImageMetadata) -> Tuple[float, float]:\n \"\"\"\n Get the XY origin implied by all of the ROIs in a\n TIFF file\n \"\"\"\n origin_row = None\n origin_col = None\n for i_roi in range(metadata.n_rois):\n roi_center = metadata.roi_center(i_roi=i_roi)\n physical_size = metadata.roi_size(i_roi=i_roi)\n this_row_min = roi_center[1]-physical_size[1]/2\n this_col_min = roi_center[0]-physical_size[0]/2\n if origin_row is None or this_row_min < origin_row:\n origin_row = this_row_min\n if origin_col is None or this_col_min < origin_col:\n origin_col = this_col_min\n\n return (origin_col, origin_row)\n\n\ndef _stitch_full_field_tiff(\n tiff_metadata: ScanImageMetadata,\n avg_img: np.ndarray) -> np.ndarray:\n \"\"\"\n Stitch the full field TIFF into a single image, i.e.\n take the image produced by _average_full_field_tiff\n and rearrange its pixels to remove the artificial\n gaps betwen ROIs and arrange the ROIs according\n to their actual positions in physical space.\n\n Parameters\n ----------\n tiff_metadata: ScanImageMetadata\n The metadata associated with thi avg_image\n\n avg_img: np.ndarray\n average image returned by _average_full_field_tiff\n\n Returns\n -------\n stitched_img: np.ndarray\n \"\"\"\n\n final_shapes = _get_stitched_tiff_shapes(\n tiff_metadata=tiff_metadata,\n avg_img=avg_img)\n\n stitched_shape = final_shapes['shape']\n pixel_gap = final_shapes['gap']\n\n # Make sure ROIs all have the same size in physical\n # units and pixels\n\n (resolution,\n physical_size) = _validate_all_roi_same_size(tiff_metadata)\n\n physical_to_pixels = (resolution[0]/physical_size[0],\n resolution[1]/physical_size[1])\n\n (origin_col,\n origin_row) = _get_origin(tiff_metadata)\n\n stitched_img = np.zeros(stitched_shape, dtype=avg_img.dtype)\n\n for i_roi in range(tiff_metadata.n_rois):\n roi_center = tiff_metadata.roi_center(i_roi=i_roi)\n roi_row0 = roi_center[1]-physical_size[1]/2\n roi_col0 = roi_center[0]-physical_size[0]/2\n pix_row0 = np.round((roi_row0-origin_row)*physical_to_pixels[1])\n pix_row0 = pix_row0.astype(int)\n pix_col0 = np.round((roi_col0-origin_col)*physical_to_pixels[0])\n pix_col0 = pix_col0.astype(int)\n\n sub_img = avg_img[i_roi*(resolution[1]+pixel_gap):\n i_roi*pixel_gap+(i_roi+1)*resolution[1],\n :]\n\n stitched_img[pix_row0:pix_row0+resolution[1],\n pix_col0:pix_col0+resolution[0]] = sub_img\n\n return stitched_img\n","repo_name":"AllenInstitute/ophys_etl_pipelines","sub_path":"src/ophys_etl/modules/mesoscope_splitting/full_field_utils.py","file_name":"full_field_utils.py","file_ext":"py","file_size_in_byte":12905,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"72721854984","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 29 21:40:30 2019\r\n\r\n@author: Андрей\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as graph\r\nfrom collections import defaultdict\r\nimport math\r\n\r\n#task 1 a\r\n\r\ndef func(x):\r\n x = np.longdouble(x)\r\n return x * (np.sin(2 * x))\r\n\r\ndef gen_Lk(x, xi):\r\n xi = np.longdouble(xi)\r\n x = np.longdouble(x)\r\n res = np.longdouble([])\r\n\r\n for i, a in enumerate(xi):\r\n top = np.longdouble(1)\r\n bottom = np.longdouble(1)\r\n for j, b in enumerate(xi):\r\n if i != j:\r\n top *= (x - b)\r\n bottom *= (a - b)\r\n res = np.append(res, top / bottom)\r\n\r\n return res\r\n\r\ndef calc_lagrange(x, xvals, yvals):\r\n Lk = gen_Lk(x, xvals)\r\n result = np.longdouble(0)\r\n for i, value in enumerate(yvals):\r\n result += np.longdouble(value) * Lk[i]\r\n return result\r\n\r\ndef calc_err(xvals, yvals, a, b, N):\r\n ans = 0\r\n left = a\r\n step = np.longdouble((b - a) / N)\r\n while left <= b:\r\n ans = max(ans, np.abs(calc_lagrange(left, xvals, yvals) - func(left)))\r\n left += step\r\n return ans\r\n\r\ndef gen_x_for_lagrange(x0, deg):\r\n return x0 - 5 + np.arange(0, deg + 1) * 10 / deg\r\n\r\ndef plot_error(x0, deg, N, fn):\r\n lagrange_x = gen_x_for_lagrange(x0, deg)\r\n lagrange_y = [func(pnt) for pnt in lagrange_x]\r\n xs = np.arange(x0 - 5, x0 + 5, 10.0 / N)\r\n ys = [np.abs(calc_lagrange(x, lagrange_x, lagrange_y) - func(x)) for x in xs]\r\n\r\n graph.clf()\r\n graph.plot(xs, ys)\r\n graph.xlabel('x')\r\n graph.ylabel('err')\r\n graph.savefig(fn)\r\n print('Max error ' + str(calc_err(lagrange_x, lagrange_y, x0 - 5, x0 + 5, N)))\r\n\r\n#task 1b\r\ndef task1b():\r\n xs = range(5, 51)\r\n x0 = 100\r\n ys = []\r\n for deg in range(5, 51):\r\n lagrange_x = gen_x_for_lagrange(x0, deg)\r\n lagrange_y = [func(pnt) for pnt in lagrange_x]\r\n ys.append(calc_err(lagrange_x, lagrange_y, x0 - 5, x0 + 5, 1000))\r\n graph.clf()\r\n graph.plot(xs, ys)\r\n graph.ylabel('max err')\r\n graph.xlabel('N')\r\n graph.savefig('1b.png') \r\n\r\ndef task1bscale():\r\n xs = range(20, 51)\r\n x0 = 100\r\n ys = []\r\n for deg in range(20, 51):\r\n lagrange_x = gen_x_for_lagrange(x0, deg)\r\n lagrange_y = [func(pnt) for pnt in lagrange_x]\r\n ys.append(calc_err(lagrange_x, lagrange_y, x0 - 5, x0 + 5, 1000))\r\n graph.clf()\r\n graph.plot(xs, ys)\r\n graph.ylabel('max err')\r\n graph.xlabel('N')\r\n graph.savefig('1bscale.png')\r\n \r\ndef task1blog():\r\n xs = range(5, 51)\r\n x0 = 100\r\n ys = []\r\n for deg in range(5, 51):\r\n lagrange_x = gen_x_for_lagrange(x0, deg)\r\n lagrange_y = [func(pnt) for pnt in lagrange_x]\r\n ys.append(np.log10(calc_err(lagrange_x, lagrange_y, x0 - 5, x0 + 5, 1000)))\r\n graph.clf()\r\n graph.plot(xs, ys)\r\n graph.ylabel('max err')\r\n graph.xlabel('N')\r\n graph.savefig('1blog.png') \r\n \r\n#task 1с\r\n\r\ndef cheb(k, deg):\r\n return np.longdouble(math.cos(np.longdouble(math.pi) / 2 * (2 * k - 1) / deg))\r\n\r\ndef move_segment(a, b, t):\r\n return np.longdouble(0.5) * (a + b) + np.longdouble(0.5) * (b - a) * t\r\n\r\ndef gen_arr(N):\r\n return np.longdouble(np.array([np.cos((np.pi * (2 * k - 1)) / (2 * N)) for k in range(1, N + 1)]))\r\n\r\ndef task1c():\r\n xs = range(5, 51)\r\n x0 = 100\r\n ys = []\r\n for deg in range(5, 51):\r\n cheb_arr = gen_arr(deg+1)\r\n lagrange_x = [move_segment(95, 105, t) for t in cheb_arr]\r\n lagrange_y = [func(pnt) for pnt in lagrange_x]\r\n ys.append(calc_err(lagrange_x, lagrange_y, x0 - 5, x0 + 5, 1000))\r\n graph.clf()\r\n graph.plot(xs, ys)\r\n graph.ylabel('max err')\r\n graph.xlabel('N')\r\n graph.savefig('1c.png')\r\n\r\ndef task1c_both():\r\n xs = range(5, 51)\r\n x0 = 100\r\n ys = []\r\n ys1 = []\r\n for deg in range(5, 51):\r\n cheb_arr = gen_arr(deg+1)\r\n lagrange_x = [move_segment(95, 105, t) for t in cheb_arr]\r\n lagrange_y = [func(pnt) for pnt in lagrange_x]\r\n ys.append(calc_err(lagrange_x, lagrange_y, x0 - 5, x0 + 5, 1000))\r\n lagrange_x1 = gen_x_for_lagrange(x0, deg)\r\n lagrange_y1 = [func(pnt) for pnt in lagrange_x1]\r\n ys1.append(calc_err(lagrange_x1, lagrange_y1, x0 - 5, x0 + 5, 1000))\r\n graph.clf()\r\n graph.plot(xs, ys)\r\n graph.plot(xs, ys1)\r\n graph.ylabel('max err')\r\n graph.xlabel('N')\r\n graph.savefig('1c_both.png')\r\n\r\ndef task1c_both_vals():\r\n xs = range(5, 51)\r\n x0 = 100\r\n ys = []\r\n ys1 = []\r\n for deg in range(5, 51):\r\n cheb_arr = gen_arr(deg+1)\r\n lagrange_x = [move_segment(95, 105, t) for t in cheb_arr]\r\n lagrange_y = [func(pnt) for pnt in lagrange_x]\r\n ys.append(calc_lagrange(x0, lagrange_x, lagrange_y))\r\n lagrange_x1 = gen_x_for_lagrange(x0, deg)\r\n lagrange_y1 = [func(pnt) for pnt in lagrange_x1]\r\n ys1.append(calc_lagrange(x0, lagrange_x1, lagrange_y1))\r\n graph.clf()\r\n graph.plot(xs, ys)\r\n graph.plot(xs, ys1)\r\n graph.ylabel('value in x0')\r\n graph.xlabel('N')\r\n graph.savefig('1c_vals.png')\r\n\r\n#task 1 d\r\ndef fm(x):\r\n return abs(x - 1)\r\n\r\ndef gen_x_for_lagrange_fm(x0, deg):\r\n return x0 - 1 + np.arange(0, deg + 1) * 2 / deg\r\n\r\ndef calc_err_fm(xvals, yvals, a, b, N):\r\n ans = 0\r\n left = a\r\n step = np.longdouble((b - a) / N)\r\n while left <= b:\r\n ans = max(ans, np.abs(calc_lagrange(left, xvals, yvals) - fm(left)))\r\n left += step\r\n return ans\r\n\r\ndef task1d_std():\r\n xs = range(5, 51)\r\n x0 = 1\r\n ys = []\r\n for deg in range(5, 51):\r\n lagrange_x = gen_x_for_lagrange_fm(x0, deg)\r\n lagrange_y = [fm(pnt) for pnt in lagrange_x]\r\n ys.append(np.log10(calc_err_fm(lagrange_x, lagrange_y, x0 - 1, x0 + 1, 1000)))\r\n graph.clf()\r\n graph.plot(xs, ys)\r\n graph.ylabel('max err')\r\n graph.xlabel('N')\r\n graph.savefig('1d_stdlog.png') \r\n\r\ndef task1d_cheb():\r\n xs = range(5, 51)\r\n ys = []\r\n for deg in range(5, 51):\r\n cheb_arr = gen_arr(deg+1)\r\n lagrange_x = [move_segment(0, 2, t) for t in cheb_arr]\r\n lagrange_y = [fm(pnt) for pnt in lagrange_x]\r\n ys.append(calc_err_fm(lagrange_x, lagrange_y, 0, 2, 1000))\r\n graph.clf()\r\n graph.plot(xs, ys)\r\n graph.ylabel('max err')\r\n graph.xlabel('N')\r\n graph.savefig('1d_cheb.png')\r\n \r\ndef task1d_all():\r\n xs = range(5, 100)\r\n ys1 = []\r\n ys2 = []\r\n ys3 = []\r\n ys4 = []\r\n for deg in range(5, 100):\r\n \"\"\"cheb_arr = gen_arr(deg+1)\r\n lagrange_x1 = [move_segment(0, 2, t) for t in cheb_arr]\r\n lagrange_y1 = [fm(pnt) for pnt in lagrange_x1]\r\n ys1.append(np.log10(calc_err_fm(lagrange_x1, lagrange_y1, 0, 2, 1000)))\r\n \r\n lagrange_x2 = gen_x_for_lagrange_fm(1, deg)\r\n lagrange_y2 = [fm(pnt) for pnt in lagrange_x2]\r\n ys2.append(np.log10(calc_err_fm(lagrange_x2, lagrange_y2, 0, 2, 1000)))\r\n \r\n lagrange_x3 = [move_segment(95, 105, t) for t in cheb_arr]\r\n lagrange_y3 = [func(pnt) for pnt in lagrange_x3]\r\n ys3.append(np.log10(calc_err(lagrange_x3, lagrange_y3, 95, 105, 1000)))\"\"\"\r\n \r\n lagrange_x4 = gen_x_for_lagrange(100, deg)\r\n lagrange_y4 = [func(pnt) for pnt in lagrange_x4]\r\n ys4.append((calc_err(lagrange_x4, lagrange_y4, 95, 105, 1000)))\r\n graph.clf()\r\n #graph.plot(xs, ys1)\r\n #graph.plot(xs, ys2)\r\n #graph.plot(xs, ys3)\r\n graph.plot(xs, ys4)\r\n #graph.legend(['fM cheb', 'fM uni', 'fS cheb', 'fS uni'], loc='upper left')\r\n graph.ylabel('max err')\r\n graph.xlabel('N')\r\n graph.savefig('1d_4.png')\r\n\r\nif __name__ == \"__main__\":\r\n task1d_all()\r\n #task1d_std()\r\n #task1d_cheb()\r\n #task1с()\r\n #task1с_both()\r\n #task1c_both_vals()\r\n #task1b()\r\n #task1blog()\r\n #task1bscale()\r\n #plot_error(100, 5, 1000, 'err5.png')\r\n #plot_error(100, 10, 1000, 'err10.png')\r\n #plot_error(100, 15, 1000, 'err15.png')\r\n","repo_name":"Hotckiss/NumMethods","sub_path":"hw2/hw2script.py","file_name":"hw2script.py","file_ext":"py","file_size_in_byte":7949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43208014283","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on 19/07/2021\r\n@author: Akshay Prakash\r\n\"\"\"\r\n\r\n\r\nimport pyaudio\r\nimport speech_recognition as sr\r\nimport pyttsx3\r\nimport time\r\nimport pywhatkit\r\nimport datetime\r\nimport wikipedia\r\nimport pyjokes\r\nimport webbrowser\r\nimport os\r\nimport subprocess\r\nfrom ecapture import ecapture as ec\r\nimport wolframalpha\r\nimport json\r\nimport requests\r\n\r\n#listen\r\nlistener = sr.Recognizer()\r\n\r\n#initalise engine\r\nengine= pyttsx3.init()\r\nengine.setProperty('rate', 170)\r\nvoices= engine.getProperty('voices')\r\n\r\nfor voice in voices:\r\n id= \"ID: %s\" %voice.id\r\n #print(\"ID: %s\" %voice.id)\r\n\r\n#Insert your HKEY for the narrator's voices\r\n#Eg: voice_id = \"HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\MSTTS_V110_enGB_GeorgeM\"\r\nvoice_id = \" \"\r\nengine.setProperty('voice', voice_id)\r\n\r\n#start engine\r\ndef talk(text):\r\n engine.say(text)\r\n engine.runAndWait()\r\n\r\n#Greet\r\ndef wishMe():\r\n hour=datetime.datetime.now().hour\r\n if hour>=0 and hour<12:\r\n speak(\"Hello,Good Morning\")\r\n print(\"Hello,Good Morning\")\r\n elif hour>=12 and hour<18:\r\n speak(\"Hello,Good Afternoon\")\r\n print(\"Hello,Good Afternoon\")\r\n else:\r\n speak(\"Hello,Good Evening\")\r\n print(\"Hello,Good Evening\")\r\n \r\n#Take command\r\ndef take_command():\r\n try:\r\n #Pass if not recognized\r\n #use micophone as source and call the speech recognzier to listen to the source\r\n with sr.Microphone() as source:\r\n print('listening..')\r\n voice= listener.listen(source)\r\n command= listener.recognize_google(voice)\r\n command= command.lower() #necessary for print\r\n except:\r\n pass\r\n return command\r\n\r\n#Function for while \r\ndef run_bot():\r\n command= take_command()\r\n #print(command)\r\n\r\n #exit\r\n if 'bye' in command or 'stop' in command or 'see you' in command or 'thank you' in command:\r\n talk('I will see you again friend')\r\n print('Shutting down')\r\n exit()\r\n \r\n #Songs\r\n elif 'play' in command:\r\n #Songs on Youtube\r\n song= command.replace('play', '')\r\n talk('Playing'+ song)\r\n pywhatkit.playonyt(song)\r\n \r\n #Time\r\n elif 'time' in command:\r\n #Return hours and minutes, 1 to 12 hr %I\r\n current_time= datetime.datetime.now().strftime('%I %M %p')\r\n talk('It is'+ current_time + 'in India now')\r\n print(current_time)\r\n \r\n #Gain information\r\n elif 'wikipedia search' in command:\r\n #wikipedia information (object, no of lines)\r\n try:\r\n object= command.replace('wikipedia search', '')\r\n info = wikipedia.summary(object, sentences=1, auto_suggest= True)\r\n print(info)\r\n talk(info)\r\n time.sleep(2)\r\n talk('Do you want me to give you a brief information?')\r\n command= take_command()\r\n\r\n if 'yes' in command:\r\n talk('Okay')\r\n #command= command.replace('yes', '')\r\n extra_results= wikipedia.summary(object, sentences= 5)\r\n talk(extra_results)\r\n else:\r\n talk('Okay, let me know if you need anything else')\r\n\r\n except wikipedia.exceptions.DisambiguationError as e:\r\n talk('I cannot figure what you are asking particularly. Here is a list for you.')\r\n time.sleep(1)\r\n talk(e.options)\r\n talk('These are the options available. What do you want me to search')\r\n #pluton\r\n\r\n except wikipedia.exceptions.PageError as page_error:\r\n talk('Your search does not match any pages. Please try again')\r\n\r\n #tell a joke\r\n elif 'joke' in command:\r\n talk(pyjokes.get_joke())\r\n\r\n #open gmail\r\n elif 'open gmail' in command:\r\n talk('Okay')\r\n webbrowser.open_new_tab('gmail.com')\r\n talk('Your Gmail is open now')\r\n\r\n #just open Google\r\n elif 'open google' in command:\r\n talk('Opening google in new tab')\r\n webbrowser.open_new_tab('www.google.com')\r\n talk('Let me know if you need anything else')\r\n\r\n #just open YouTube\r\n elif 'open youtube' in command:\r\n talk('Opening YouTube')\r\n webbrowser.open_new_tab('www.youtube.com')\r\n talk('Youtube is open now')\r\n\r\n #search on google\r\n elif 'search' in command:\r\n the_keyword= command.replace('search', '')\r\n pywhatkit.search(the_keyword)\r\n talk('Searching' + the_keyword)\r\n\r\n #latest news\r\n elif 'pluton news' in command:\r\n talk('Displaying latest news for you')\r\n news= webbrowser.open_new_tab('https://timesofindia.indiatimes.com/home/headlines')\r\n\r\n elif 'latest science news' in command:\r\n talk('Displaying latest scientific discoveries and inventions for you')\r\n news= webbrowser.open_new_tab('https://www.sciencedaily.com/news/top/science/')\r\n news_2= webbrowser.open_new_tab('https://www.livescience.com/news')\r\n\r\n #ecapture\r\n #accepts 3 parameters, first connected index is 0\r\n elif 'take a photo' in command or 'capture' in command:\r\n talk('Say cheeeeeeeeeeeeeeeeese')\r\n ec.capture(0, 'robo camera', 'img.jpg')\r\n\r\n #search on the internet\r\n elif 'open website' in command:\r\n command= command.replace('open website', '')\r\n webbrowser.open_new_tab(command)\r\n talk('Opened' + command)\r\n\r\n #Create wolfram alpha account and insert your UNIQUE-> app_id\r\n #listens two times in this statement\r\n elif 'question' in command:\r\n talk('I can answer to computational and geographical questions, what question do you want to ask now?')\r\n question=take_command() # give question\r\n app_id=\" \"\r\n client = wolframalpha.Client(app_id) #instance of class wolframalpha\r\n res = client.query(question) #res stores the response\r\n answer = next(res.results).text\r\n talk(answer)\r\n print(answer)\r\n\r\n #elif 'start' in command:\r\n # talk('tell me what to run')\r\n # app_name= take_command()\r\n # os.system(str(app_name))\r\n\r\n #subprocess can execute other programs\r\n #shut down /l means log off\r\n elif 'terminate' in command:\r\n talk('Okay')\r\n talk('Your PC will shut down in 60 seconds, please make sure you have saved and closed all applications')\r\n subprocess.call(['shutdown', '/s'])\r\n\r\n else:\r\n talk('My bad')\r\n talk('Could you repeat?')\r\n\r\nwhile True:\r\n wishMe()\r\n run_bot()\r\n","repo_name":"aprakash7/virtual_assistant","sub_path":"pluton.py","file_name":"pluton.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74578380743","text":"# app/repositories/prism_repository.py\nfrom models.hedron import Hedron\nfrom models.prism import PrismID, Prism, Horoscope, PrismGender\n\n\n# from models.player import Player\n# from extensions import db\n\n\n# class HedronRepository:\n# @classmethod\n# def create(cls, hedron_id):\n# player = Hedron(hedron_id=hedron_id)\n# # db.session.add(player)\n# # db.session.commit()\n# return player\n\n # @classmethod\n # def get_by_id(cls, player_id):\n # return Player.query.get(player_id)\n #\n # @classmethod\n # def get_by_player_name(cls, player_name):\n # return Player.query.filter_by(player_name=player_name).first()\n #\n # @classmethod\n # def update(cls, player):\n # db.session.commit()\n #\n # @classmethod\n # def delete(cls, player):\n # db.session.delete(player)\n # db.session.commit()\n\ndef create_default_hedron(hid=PrismID.Mu):\n return Hedron(hid=hid, prisms={\n PrismID.Omega: Prism(\n pid=PrismID.Omega,\n first_name=\"Walter\",\n family_name=\"Black\",\n birth_sign=Horoscope.Scorpio,\n gender=PrismGender.Male\n ),\n PrismID.Mu: Prism(\n pid=PrismID.Mu,\n first_name=\"Ashely\",\n family_name=\"Purple\",\n birth_sign=Horoscope.Libra,\n gender=PrismGender.Female\n ),\n PrismID.Alpha: Prism(\n pid=PrismID.Alpha,\n first_name=\"Jack\",\n family_name=\"Crimson\",\n birth_sign=Horoscope.Ares,\n gender=PrismGender.Male\n ),\n PrismID.Gamma: Prism(\n pid=PrismID.Gamma,\n first_name=\"Lindsey\",\n family_name=\"Green\",\n birth_sign=Horoscope.Taurus,\n gender=PrismGender.Female\n ),\n PrismID.Beta: Prism(\n pid=PrismID.Beta,\n first_name=\"Natasha\",\n family_name=\"Aqua\",\n birth_sign=Horoscope.Aquarius,\n gender=PrismGender.Female\n ),\n PrismID.Lambda: Prism(\n pid=PrismID.Lambda,\n first_name=\"Dennis\",\n family_name=\"Gold\",\n birth_sign=Horoscope.Leo,\n gender=PrismGender.Male),\n PrismID.Theta: Prism(\n pid=PrismID.Theta,\n first_name=\"John\",\n family_name=\"Cyan\",\n birth_sign=Horoscope.Pisces,\n gender=PrismGender.Male\n ),\n PrismID.Phi: Prism(\n pid=PrismID.Phi,\n first_name=\"Lisa\",\n family_name=\"Pink\",\n birth_sign=Horoscope.Cancer,\n gender=PrismGender.Male\n )\n })\n\n\nDEBUG_HEDRON = create_default_hedron()\nVALID_HEDRON_IDS = [DEBUG_HEDRON.hid]\nVALID_HEDRON_TYPES = [\"blackjack\", \"music\", \"chess\", \"conquest\", \"social\"]\n\n\ndef get_app_ids_from_hid(hid):\n if hid in VALID_HEDRON_IDS:\n return VALID_HEDRON_TYPES\n raise Exception(f\"{hid} was not found\")\n\n\ndef get_all_hids():\n return VALID_HEDRON_IDS\n\n\ndef get_valid_hedron_ids():\n return VALID_HEDRON_IDS\n\n\ndef update_hedron_id(hedron_default_id):\n DEBUG_HEDRON.hid = hedron_default_id\n return DEBUG_HEDRON\n\n\ndef hedron_by_id(valid_id):\n if isinstance(valid_id, str):\n for hid in [DEBUG_HEDRON.hid]:\n if hid.name.lower() == valid_id.lower():\n valid_id = hid\n\n if valid_id not in VALID_HEDRON_IDS:\n raise Exception(f\"{valid_id} is not a valid ID\")\n return DEBUG_HEDRON\n\n\ndef get_default_hid():\n return DEBUG_HEDRON.hid\n\n","repo_name":"WestiferRobin/mu-prism","sub_path":"repositories/hedron_repository.py","file_name":"hedron_repository.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3816792071","text":"# -*- coding: utf-8 -*-\n\n# system imports\nimport os.path as osp\nfrom queue import Queue\n\n# external imports\nfrom PyQt6 import QtGui, QtCore, QtWidgets\nfrom PyQt6.QtCore import QModelIndex, Qt\n\n# maestral modules\nfrom maestral.utils.appdirs import get_home_dir\nfrom maestral.utils.path import delete\n\n# local imports\nfrom .utils import MaestralBackgroundTask, icon_to_pixmap, is_empty\nfrom .widgets import UserDialog\nfrom .selective_sync_dialog import AsyncListFolder, FileSystemModel, DropboxPathItem\nfrom .resources import APP_ICON_PATH, native_folder_icon\nfrom .resources.ui_setup_dialog import Ui_SetupDialog\n\n\n# noinspection PyArgumentList\nclass SetupDialog(QtWidgets.QDialog, Ui_SetupDialog):\n \"\"\"A dialog to link and set up a new Dropbox account.\"\"\"\n\n accepted = False\n\n def __init__(self, mdbx, parent=None):\n super().__init__(parent=parent)\n self.setupUi(self)\n\n self.mdbx = mdbx\n self.config_name = self.mdbx.config_name\n self.dbx_model = None\n self.excluded_items = []\n\n self.app_icon = QtGui.QIcon(APP_ICON_PATH)\n\n self.labelIcon_0.setPixmap(icon_to_pixmap(self.app_icon, 150))\n self.labelIcon_1.setPixmap(icon_to_pixmap(self.app_icon, 70))\n self.labelIcon_2.setPixmap(icon_to_pixmap(self.app_icon, 70))\n self.labelIcon_3.setPixmap(icon_to_pixmap(self.app_icon, 120))\n\n # prepare auth session\n self.auth_url = self.mdbx.get_auth_url()\n prompt = self.labelAuthLink.text().format(self.auth_url)\n self.labelAuthLink.setText(prompt)\n\n # set up Dropbox location combobox\n\n self.dropbox_location = self.mdbx.get_conf(\"sync\", \"path\")\n\n if self.dropbox_location == \"\":\n folder_name = f\"Dropbox ({self.config_name.capitalize()})\"\n self.dropbox_location = osp.join(get_home_dir(), folder_name)\n\n self.comboBoxDropboxPath.addItem(native_folder_icon(), self.dropbox_location)\n self.comboBoxDropboxPath.insertSeparator(1)\n self.comboBoxDropboxPath.addItem(QtGui.QIcon(), \"Choose...\")\n self.comboBoxDropboxPath.currentIndexChanged.connect(self.on_combobox)\n\n # resize dialog buttons\n width = self.pushButtonAuthPageCancel.width() * 1.1\n width = round(width)\n for b in (\n self.pushButtonAuthPageLink,\n self.pushButtonDropboxPathUnlink,\n self.pushButtonDropboxPathSelect,\n self.pushButtonFolderSelectionBack,\n self.pushButtonFolderSelectionSelect,\n self.pushButtonAuthPageCancel,\n self.pushButtonDropboxPathCancel,\n self.pushButtonClose,\n ):\n b.setMinimumWidth(width)\n b.setMaximumWidth(width)\n\n self.dropbox_folder_dialog = QtWidgets.QFileDialog(self)\n self.dropbox_folder_dialog.setAcceptMode(\n QtWidgets.QFileDialog.AcceptMode.AcceptOpen\n )\n self.dropbox_folder_dialog.setFileMode(QtWidgets.QFileDialog.FileMode.Directory)\n self.dropbox_folder_dialog.setOption(\n QtWidgets.QFileDialog.Option.ShowDirsOnly, True\n )\n self.dropbox_folder_dialog.setLabelText(\n QtWidgets.QFileDialog.DialogLabel.Accept, \"Select\"\n )\n self.dropbox_folder_dialog.setDirectory(get_home_dir())\n self.dropbox_folder_dialog.fileSelected.connect(self.on_new_dbx_folder)\n self.dropbox_folder_dialog.rejected.connect(\n lambda: self.comboBoxDropboxPath.setCurrentIndex(0)\n )\n\n # connect buttons to callbacks\n self.setAttribute(Qt.WidgetAttribute.WA_DeleteOnClose)\n self.pushButtonLink.clicked.connect(self.on_link_clicked)\n self.pushButtonAuthPageCancel.clicked.connect(self.on_reject_requested)\n self.pushButtonAuthPageLink.clicked.connect(self.on_auth_clicked)\n self.pushButtonDropboxPathCancel.clicked.connect(self.on_reject_requested)\n self.pushButtonDropboxPathSelect.clicked.connect(\n self.on_dropbox_location_selected\n )\n self.pushButtonDropboxPathUnlink.clicked.connect(self.unlink_and_go_to_start)\n self.pushButtonFolderSelectionBack.clicked.connect(\n self.stackedWidget.slideInPrev\n )\n self.pushButtonFolderSelectionSelect.clicked.connect(self.on_folders_selected)\n self.pushButtonClose.clicked.connect(self.on_accept_requested)\n self.selectAllCheckBox.clicked.connect(self.on_select_all_clicked)\n\n if not self.mdbx.pending_link:\n self.stackedWidget.setCurrentIndex(2)\n\n # =============================================================================\n # Main callbacks\n # =============================================================================\n\n def closeEvent(self, event):\n if self.stackedWidget.currentIndex == 4:\n self.on_accept_requested()\n else:\n self.on_reject_requested()\n\n def on_accept_requested(self):\n del self.mdbx\n\n self.accepted = True\n self.accept()\n\n def on_reject_requested(self):\n self.accepted = False\n self.reject()\n\n def unlink_and_go_to_start(self):\n self.mdbx.unlink()\n self.stackedWidget.slideInIdx(0)\n\n def on_link_clicked(self):\n self.stackedWidget.fadeInIdx(1)\n self.pushButtonAuthPageLink.setFocus()\n\n def on_auth_clicked(self):\n if self.lineEditAuthCode.text() == \"\":\n msg = \"Please enter an authentication token.\"\n msg_box = UserDialog(\"Authentication failed.\", msg, parent=self)\n msg_box.open()\n else:\n self.progressIndicator.startAnimation()\n self.pushButtonAuthPageLink.setEnabled(False)\n self.lineEditAuthCode.setEnabled(False)\n\n self.link_async()\n\n def link_async(self):\n token = self.lineEditAuthCode.text()\n\n self.auth_task = MaestralBackgroundTask(\n parent=self, config_name=self.mdbx.config_name, target=\"link\", args=(token,)\n )\n self.auth_task.sig_result.connect(self.on_link_done)\n\n def on_link_done(self, res):\n if res == 0:\n # switch to next page\n self.stackedWidget.slideInIdx(2)\n self.pushButtonDropboxPathSelect.setFocus()\n self.lineEditAuthCode.clear() # clear since we might come back on unlink\n\n elif res == 1:\n msg = \"Please make sure that you entered the correct authentication token.\"\n msg_box = UserDialog(\"Authentication failed.\", msg, parent=self)\n msg_box.open()\n elif res == 2:\n msg = (\n \"Please make sure that you are connected to the internet and try again.\"\n )\n msg_box = UserDialog(\"Connection failed.\", msg, parent=self)\n msg_box.open()\n\n self.progressIndicator.stopAnimation()\n self.pushButtonAuthPageLink.setEnabled(True)\n self.lineEditAuthCode.setEnabled(True)\n\n def on_dropbox_location_selected(self):\n # start with clean sync state\n self.mdbx.reset_sync_state()\n\n # apply dropbox path\n try:\n if osp.exists(self.dropbox_location):\n if is_empty(self.dropbox_location):\n delete(self.dropbox_location, raise_error=True)\n else:\n msg_box = UserDialog(\n title=\"Folder is not empty\",\n message=(\n f'The folder \"{osp.basename(self.dropbox_location)}\" is '\n \"not empty. Would you like to merge its content with your \"\n \"Dropbox?\"\n ),\n button_names=(\"Cancel\", \"Merge\"),\n parent=self,\n )\n res = msg_box.exec()\n\n if res == UserDialog.DialogCode.Accepted:\n return\n elif res == UserDialog.DialogCode.Rejected:\n pass\n\n self.mdbx.create_dropbox_directory(self.dropbox_location)\n except OSError:\n msg_box = UserDialog(\n title=\"Could not set directory\",\n message=(\n \"Please check if you have permissions to write to the \"\n \"selected location.\"\n ),\n parent=self,\n )\n msg_box.exec()\n return\n\n # switch to next page\n self.mdbx.set_conf(\"sync\", \"excluded_items\", [])\n self.stackedWidget.slideInIdx(3)\n self.treeViewFolders.setFocus()\n\n # populate folder list\n if not self.excluded_items: # don't repopulate\n self.populate_folders_list()\n\n def on_folders_selected(self):\n self.mdbx.excluded_items = self.get_excluded_items()\n\n # if any excluded items are currently on the drive, delete them\n for item in self.excluded_items:\n local_item = self.mdbx.to_local_path(item)\n delete(local_item)\n\n # switch to next page\n self.stackedWidget.slideInIdx(4)\n\n # =============================================================================\n # Helper functions\n # =============================================================================\n\n def on_combobox(self, idx):\n if idx == 2:\n self.dropbox_folder_dialog.open()\n\n def on_new_dbx_folder(self, new_location):\n self.comboBoxDropboxPath.setCurrentIndex(0)\n if not new_location == \"\":\n self.comboBoxDropboxPath.setItemText(0, new_location)\n\n self.dropbox_location = new_location\n\n def populate_folders_list(self):\n self.pushButtonFolderSelectionSelect.setEnabled(False)\n\n self.async_loader = AsyncListFolder(self.mdbx.config_name, self)\n self.dbx_root = DropboxPathItem(\n self.async_loader, set(self.mdbx.excluded_items)\n )\n self.dbx_model = FileSystemModel(self.dbx_root)\n self.dbx_model.dataChanged.connect(self.update_select_all_checkbox)\n self.treeViewFolders.setModel(self.dbx_model)\n\n self.dbx_model.loading_done.connect(\n lambda: self.pushButtonFolderSelectionSelect.setEnabled(True)\n )\n self.dbx_model.loading_failed.connect(\n lambda: self.pushButtonFolderSelectionSelect.setEnabled(False)\n )\n\n self.dbx_model.loading_done.connect(\n lambda: self.selectAllCheckBox.setEnabled(True)\n )\n self.dbx_model.loading_failed.connect(\n lambda: self.selectAllCheckBox.setEnabled(False)\n )\n\n self.dbx_model.loading_done.connect(\n lambda: self.treeViewFolders.resizeColumnToContents(0)\n )\n\n def update_select_all_checkbox(self):\n check_states = []\n for irow in range(self.dbx_model._root_item.child_count_loaded()):\n index = self.dbx_model.index(irow, 1, QModelIndex())\n check_states.append(\n self.dbx_model.data(index, Qt.ItemDataRole.CheckStateRole)\n )\n if all(cs == 2 for cs in check_states):\n self.selectAllCheckBox.setChecked(True)\n else:\n self.selectAllCheckBox.setChecked(False)\n\n def on_select_all_clicked(self, checked):\n checked_state = 2 if checked else 0\n for irow in range(self.dbx_model._root_item.child_count_loaded()):\n index = self.dbx_model.index(irow, 1, QModelIndex())\n self.dbx_model.setCheckState(index, checked_state)\n\n def get_excluded_items(self):\n # We start with an empty excluded list since this is the initial setup.\n # We add unchecked items to the excluded list.\n excluded_items = []\n\n queue = Queue()\n queue.put(self.dbx_model._root_item)\n\n while not queue.empty():\n node = queue.get()\n\n if node.checkState == 0:\n excluded_items.append(node._path_lower)\n\n for child in node._children:\n if isinstance(child, DropboxPathItem):\n queue.put(child)\n\n return excluded_items\n\n def changeEvent(self, event):\n if event.type() == QtCore.QEvent.Type.PaletteChange:\n self.update_dark_mode()\n\n def update_dark_mode(self):\n if self.dbx_model:\n self.dbx_model.reloadData(\n [Qt.ItemDataRole.DecorationRole]\n ) # reload folder icons\n\n # static method to create the dialog and return Maestral instance on success\n @staticmethod\n def configureMaestral(mdbx, parent=None):\n fsd = SetupDialog(mdbx, parent)\n fsd.show()\n fsd.exec()\n\n return fsd.accepted\n","repo_name":"samschott/maestral-qt","sub_path":"src/maestral_qt/setup_dialog.py","file_name":"setup_dialog.py","file_ext":"py","file_size_in_byte":12696,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"42570290608","text":"import math\nfrom pathlib import Path\n\nimport pytest\n\nfrom pythonfmu.builder import FmuBuilder\n\npytestmark = pytest.mark.skipif(\n not FmuBuilder.has_binary(), reason=\"No binary available for the current platform.\"\n)\npyfmi = pytest.importorskip(\n \"pyfmi\", reason=\"pyfmi is required for testing the produced FMU\"\n)\n\nDEMO = \"pythonslave.py\"\n\n\n@pytest.mark.integration\ndef test_integration_demo(tmp_path):\n script_file = Path(__file__).parent / DEMO\n\n fmu = FmuBuilder.build_FMU(script_file, dest=tmp_path, needsExecutionTool=\"false\")\n assert fmu.exists()\n model = pyfmi.load_fmu(str(fmu))\n res = model.simulate(final_time=0.5)\n\n assert res[\"realOut\"][-1] == pytest.approx(res[\"time\"][-1], rel=1e-7)\n\n\n@pytest.mark.integration\ndef test_integration_reset(tmp_path):\n script_file = Path(__file__).parent / DEMO\n\n fmu = FmuBuilder.build_FMU(script_file, dest=tmp_path, needsExecutionTool=\"false\")\n assert fmu.exists()\n\n vr = 5 # realOut\n dt = 0.1\n model = pyfmi.load_fmu(str(fmu))\n initial_value = model.get_real([vr])[0]\n assert initial_value == pytest.approx(3.0, rel=1e-7)\n model.do_step(0.0, dt, True)\n read = model.get_real([vr])[0]\n assert read == pytest.approx(dt, rel=1e-7)\n model.reset()\n read = model.get_real([vr])[0]\n assert read == pytest.approx(initial_value, rel=1e-7)\n\n\n@pytest.mark.integration\ndef test_integration_get_state(tmp_path):\n script_file = Path(__file__).parent / DEMO\n\n fmu = FmuBuilder.build_FMU(\n script_file,\n dest=tmp_path,\n needsExecutionTool=\"false\",\n canGetAndSetFMUstate=\"true\")\n assert fmu.exists()\n\n model = pyfmi.load_fmu(str(fmu))\n\n vr = model.get_model_variables()[\"realOut\"].value_reference\n dt = 0.1\n t = 0.0\n\n def step_model():\n nonlocal t\n model.do_step(t, dt, True)\n t += dt\n\n model.initialize()\n step_model()\n state = model.get_fmu_state()\n assert model.get_real([vr])[0] == pytest.approx(dt, rel=1e-7)\n step_model()\n assert model.get_real([vr])[0] == pytest.approx(dt * 2, rel=1e-7)\n model.set_fmu_state(state)\n assert model.get_real([vr])[0] == pytest.approx(dt, rel=1e-7)\n step_model()\n assert model.get_real([vr])[0] == pytest.approx(dt * 3, rel=1e-7)\n model.free_fmu_state(state)\n\n\n@pytest.mark.integration\ndef test_integration_get_serialize_state(tmp_path):\n fmpy = pytest.importorskip(\n \"fmpy\", reason=\"fmpy is not available for testing the produced FMU\"\n )\n\n script_file = Path(__file__).parent / DEMO\n\n fmu = FmuBuilder.build_FMU(\n script_file,\n dest=tmp_path,\n canGetAndSetFMUstate=\"true\",\n canSerializeFMUstate=\"true\")\n assert fmu.exists()\n\n model_description = fmpy.read_model_description(fmu)\n unzip_dir = fmpy.extract(fmu)\n\n model = fmpy.fmi2.FMU2Slave(\n guid=model_description.guid,\n unzipDirectory=unzip_dir,\n modelIdentifier=model_description.coSimulation.modelIdentifier,\n instanceName='instance1')\n\n realOut = filter(\n lambda var: var.name == \"realOut\", model_description.modelVariables\n )\n vrs = list(map(lambda var: var.valueReference, realOut))\n t = 0.0\n dt = 0.1\n\n def step_model():\n nonlocal t\n model.doStep(t, dt)\n t += dt\n\n model.instantiate()\n model.setupExperiment()\n model.enterInitializationMode()\n model.exitInitializationMode()\n\n step_model()\n state = model.getFMUstate()\n assert model.getReal(vrs)[0] == pytest.approx(dt, rel=1e-7)\n step_model()\n assert model.getReal(vrs)[0] == pytest.approx(dt * 2, rel=1e-7)\n model.setFMUstate(state)\n assert model.getReal(vrs)[0] == pytest.approx(dt, rel=1e-7)\n step_model()\n assert model.getReal(vrs)[0] == pytest.approx(dt * 3, rel=1e-7)\n\n serialize_fmu_state = model.serializeFMUstate(state)\n model.freeFMUstate(state)\n de_serialize_fmu_state = model.deSerializeFMUstate(serialize_fmu_state)\n model.setFMUstate(de_serialize_fmu_state)\n assert model.getReal(vrs)[0] == pytest.approx(dt, rel=1e-7)\n\n model.freeFMUstate(de_serialize_fmu_state)\n model.terminate()\n\n\n@pytest.mark.integration\ndef test_integration_get(tmp_path):\n script_file = Path(__file__).parent / DEMO\n\n fmu = FmuBuilder.build_FMU(script_file, dest=tmp_path, needsExecutionTool=\"false\")\n assert fmu.exists()\n model = pyfmi.load_fmu(str(fmu))\n\n to_test = {\n \"intParam\": 42,\n \"intOut\": 23,\n \"realOut\": 3.0,\n \"booleanVariable\": True,\n \"stringVariable\": \"Hello World!\",\n \"realIn\": 2.0 / 3.0,\n \"booleanParameter\": False,\n \"stringParameter\": \"dog\",\n \"container.someReal\": 99.0,\n \"container.subContainer.someInteger\": -15\n }\n\n variables = model.get_model_variables()\n for key, value in to_test.items():\n var = variables[key]\n if var.type == pyfmi.fmi.FMI2_INTEGER:\n model_value = model.get_integer([var.value_reference])[0]\n elif var.type == pyfmi.fmi.FMI2_REAL:\n model_value = model.get_real([var.value_reference])[0]\n elif var.type == pyfmi.fmi.FMI2_BOOLEAN:\n model_value = model.get_boolean([var.value_reference])[0]\n elif var.type == pyfmi.fmi.FMI2_STRING:\n model_value = model.get_string([var.value_reference])[0]\n else:\n pytest.xfail(\"Unsupported type\")\n\n assert model_value == value\n\n\n@pytest.mark.integration\ndef test_integration_set(tmp_path):\n script_file = Path(__file__).parent / DEMO\n\n fmu = FmuBuilder.build_FMU(script_file, dest=tmp_path, needsExecutionTool=\"false\")\n assert fmu.exists()\n model = pyfmi.load_fmu(str(fmu))\n\n to_test = {\n \"intParam\": 20,\n \"realIn\": 1.0 / 3.0,\n \"booleanParameter\": True,\n \"stringParameter\": \"cat\",\n \"container.someReal\": 42.0,\n \"container.subContainer.someInteger\": 421\n }\n\n variables = model.get_model_variables()\n for key, value in to_test.items():\n var = variables[key]\n if var.type == pyfmi.fmi.FMI2_INTEGER:\n model.set_integer([var.value_reference], [value])\n model_value = model.get_integer([var.value_reference])[0]\n elif var.type == pyfmi.fmi.FMI2_REAL:\n model.set_real([var.value_reference], [value])\n model_value = model.get_real([var.value_reference])[0]\n elif var.type == pyfmi.fmi.FMI2_BOOLEAN:\n model.set_boolean([var.value_reference], [value])\n model_value = model.get_boolean([var.value_reference])[0]\n elif var.type == pyfmi.fmi.FMI2_STRING:\n model.set_string([var.value_reference], [value])\n model_value = model.get_string([var.value_reference])[0]\n else:\n pytest.xfail(\"Unsupported type\")\n\n assert model_value == value\n\n\n@pytest.mark.integration\ndef test_simple_integration_fmpy(tmp_path):\n fmpy = pytest.importorskip(\n \"fmpy\", reason=\"fmpy is not available for testing the produced FMU\"\n )\n\n script_file = Path(__file__).parent / DEMO\n\n fmu = FmuBuilder.build_FMU(script_file, dest=tmp_path)\n assert fmu.exists()\n res = fmpy.simulate_fmu(str(fmu), stop_time=2.0)\n\n assert res[\"realOut\"][-1] == pytest.approx(res[\"time\"][-1], rel=1e-7)\n\n\n@pytest.mark.integration\ndef test_integration_has_local_dep(tmp_path):\n slave_code = \"\"\"import math\nfrom pythonfmu.fmi2slave import Fmi2Slave, Fmi2Causality, Integer, Real, Boolean, String\nfrom localmodule import get_amplitude, get_time_constant\n\n\nclass PythonSlaveWithDep(Fmi2Slave):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.realIn = 22.0\n self.realOut = 0.0\n self.register_variable(Real(\"realIn\", causality=Fmi2Causality.input))\n self.register_variable(Real(\"realOut\", causality=Fmi2Causality.output))\n\n def do_step(self, current_time, step_size):\n self.realOut = self.realIn * get_amplitude() * math.exp((current_time + step_size) / get_time_constant())\n return True\n\"\"\"\n\n local_module = \"\"\"def get_amplitude():\n return 5.\n\ndef get_time_constant():\n return 0.1\n\"\"\"\n\n script_file = tmp_path / \"orig\" / \"slavewithdep.py\"\n script_file.parent.mkdir(parents=True, exist_ok=True)\n script_file.write_text(slave_code)\n\n local_file = script_file.parent / \"localmodule.py\"\n local_file.write_text(local_module)\n\n fmu = FmuBuilder.build_FMU(\n script_file,\n dest=tmp_path,\n project_files=[local_file],\n needsExecutionTool=\"false\",\n )\n assert fmu.exists()\n model = pyfmi.load_fmu(str(fmu))\n res = model.simulate(final_time=0.5)\n\n assert res[\"realOut\"][-1] == pytest.approx(\n 22.0 * 5.0 * math.exp(res[\"time\"][-1] / 0.1), rel=1e-7\n )\n\n\n@pytest.mark.integration\ndef test_integration_throw_py_error(tmp_path):\n fmpy = pytest.importorskip(\n \"fmpy\", reason=\"fmpy is not available for testing the produced FMU\"\n )\n\n slave_code = \"\"\"from pythonfmu.fmi2slave import Fmi2Slave, Fmi2Causality, Integer, Real, Boolean, String\n\n\nclass PythonSlaveWithException(Fmi2Slave):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.realIn = 22.0\n self.realOut = 0.0\n self.register_variable(Real(\"realIn\", causality=Fmi2Causality.input))\n self.register_variable(Real(\"realOut\", causality=Fmi2Causality.output))\n\n def do_step(self, current_time, step_size):\n raise RuntimeError()\n return True\n\"\"\"\n\n script_file = tmp_path / \"orig\" / \"slavewithexception.py\"\n script_file.parent.mkdir(parents=True, exist_ok=True)\n script_file.write_text(slave_code)\n\n fmu = FmuBuilder.build_FMU(script_file, dest=tmp_path)\n assert fmu.exists()\n\n with pytest.raises(Exception):\n fmpy.simulate_fmu(str(fmu), stop_time=1.0)\n","repo_name":"StanleyYoo/PythonFMU","sub_path":"pythonfmu/tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":9825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"4782069064","text":"import cv2\nimport numpy as np\nfrom rect import Rect\nfrom vector import Vector\n\ndef load_video_file(path: str):\n \"\"\"\n Load a video file and get its metadata.\n\n Parameters:\n path (str): The file path of the video.\n\n Returns:\n Tuple[cv2.VideoCapture, dict]: A tuple containing the video object and a dictionary with video metadata.\n - The video object (cv2.VideoCapture) to read frames from the video file.\n - The metadata dictionary containing 'frame_rate', 'frame_width', and 'frame_height'.\n \"\"\"\n video = cv2.VideoCapture(path)\n\n # Check if the video opened successfully\n if (video.isOpened() == False): \n raise (\"Error opening video file\")\n\n metadata = {\n \"frame_rate\": video.get(cv2.CAP_PROP_FPS),\n \"frame_count\": video.get(cv2.CAP_PROP_FRAME_COUNT),\n \"duration_secs\": round(video.get(cv2.CAP_PROP_FRAME_COUNT) / video.get(cv2.CAP_PROP_FPS)),\n \"frame_width\": int(video.get(cv2.CAP_PROP_FRAME_WIDTH)),\n \"frame_height\": int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)),\n }\n\n return video, metadata\n\ndef draw_rect_in_frame(frame, rect: Rect, colour: tuple, thickness: int = 2):\n \"\"\"\n Draw a rectangle on the given frame.\n\n Parameters:\n frame (numpy.ndarray): The input frame on which to draw the rectangle.\n rect (Rect): The rectangle object defining the bounding box coordinates.\n colour (tuple): The color of the rectangle in BGR format (e.g., (0, 255, 0) for green).\n\n Returns:\n None\n \"\"\"\n cv2.rectangle(frame, (rect.position.x, rect.position.y), (rect.position.x + rect.width, rect.position.y + rect.height), colour, thickness=thickness)\n\ndef draw_line_in_frame(frame, position_1: Vector, position_2: Vector, colour: tuple, thickness: int = 2):\n \"\"\"\n Draw a line on the given frame.\n\n Parameters:\n frame (numpy.ndarray): The input frame on which to draw the rectangle.\n rect (Rect): The rectangle object defining the bounding box coordinates.\n colour (tuple): The color of the rectangle in BGR format (e.g., (0, 255, 0) for green).\n\n Returns:\n None\n \"\"\"\n cv2.line(frame, (position_1.x, position_1.y), (position_2.x, position_2.y), colour, thickness=thickness)\n\ndef draw_text_in_frame(frame, text: str, position: Vector, colour: tuple, font_scale: float = 0.5, thickness: int = 2):\n \"\"\"\n Draw text on the given frame.\n\n Parameters:\n frame (numpy.ndarray): The input frame on which to draw the rectangle.\n rect (Rect): The rectangle object defining the bounding box coordinates.\n colour (tuple): The color of the rectangle in BGR format (e.g., (0, 255, 0) for green).\n\n Returns:\n None\n \"\"\"\n cv2.putText(frame, text, (position.x, position.y), cv2.FONT_HERSHEY_SIMPLEX, font_scale, colour, thickness)\n\ndef get_median_frame(file_path: str, num_samples=50):\n \"\"\"\n Calculate the median of randomly selected frames from the video.\n\n Parameters:\n file_path (str): The file path of the video.\n num_samples (int): The number of frames to randomly select for calculating the median (default: 50).\n\n Returns:\n numpy.ndarray: The background frame as a numpy array.\n \"\"\"\n video = cv2.VideoCapture(file_path)\n \n # Randomly select 50 frames for the calculating the median\n frame_indices = video.get(cv2.CAP_PROP_FRAME_COUNT) * np.random.uniform(size=num_samples)\n \n # Store the frames in array\n frames = []\n for idx in frame_indices:\n # set the frame id to read that particular frame\n video.set(cv2.CAP_PROP_POS_FRAMES, idx)\n ret, frame = video.read()\n frames.append(frame)\n\n # Calculate the median of all the frames\n median_frame = np.median(frames, axis=0).astype(np.uint8)\n\n return median_frame\n\ndef subtract_background(background_frame, current_frame):\n \"\"\"\n Subtract the background frame from the current frame.\n\n Parameters:\n background_frame (numpy.ndarray): The background frame as a numpy array.\n current_frame (numpy.ndarray): The current frame as a numpy array.\n\n Returns:\n numpy.ndarray: The subtracted frame as a numpy array.\n \"\"\"\n # Ensure both frames have the same size (width and height)\n if background_frame.shape[:2] != current_frame.shape[:2]:\n raise ValueError(\"Background and current frames must have the same size.\")\n\n # Compute the absolute difference between the two frames\n diff_frame = cv2.absdiff(background_frame, current_frame)\n\n return diff_frame\n","repo_name":"Harris-lans/traffic-tracking","sub_path":"cv_utils.py","file_name":"cv_utils.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1491061111","text":"import os\nimport sys\nimport glob\nimport numpy as np\nimport yaml as yl\nimport subprocess\n\nfrom absl import app, flags, logging\n\ndef merge_features(source1, source2):\n try:\n data1 = np.load(source1)\n data2 = np.load(source2)\n return np.concatenate((data1['values'], data2['values']))\n except Exception:\n return None\n \ndef merge(argv):\n \"\"\"Extract a graph representation.\"\"\"\n\n FLAGS = flags.FLAGS\n\n # Verify datset directory.\n if not os.path.isdir(FLAGS.dataset_directory1):\n logging.error('Dataset directory 1 {} does not exist.'.format(\n FLAGS.dataset_directory1)\n )\n sys.exit(1)\n\n # Verify datset directory.\n if not os.path.isdir(FLAGS.dataset_directory2):\n logging.error('Dataset directory 2 {} does not exist.'.format(\n FLAGS.dataset_directory2)\n )\n sys.exit(1)\n \n folders = [\n os.path.join(FLAGS.dataset_directory1, subdir)\n for subdir in os.listdir(FLAGS.dataset_directory1)\n if os.path.isdir(os.path.join(FLAGS.dataset_directory1, subdir))\n ]\n \n # Load data from all folders\n for folder in folders:\n\n idx = folder.rfind('/')\n last_folder = folder[idx+1:]\n \n # Create the output directory.\n outdir = os.path.join(FLAGS.output_directory, last_folder)\n os.makedirs(outdir, exist_ok=True)\n\n # Extract \"loop features\" from the file\n sources = glob.glob('{}/*.npz'.format(folder))\n\n for source1 in sources:\n source2 = source1.replace(FLAGS.dataset_directory1, FLAGS.dataset_directory2)\n\n idx = source1.rfind('/')\n source_name = source1[idx+1:]\n \n features = merge_features(source1, source2)\n\n if features is None:\n logging.error('Error {}.'.format(source_name))\n continue\n \n filename = source1.replace(folder, outdir)\n filename = filename[:-4]\n np.savez_compressed(filename, values=features)\n\n\n# Execute\nif __name__ == '__main__':\n # app\n flags.DEFINE_string('dataset_directory1',\n None,\n 'Dataset directory1')\n flags.mark_flag_as_required('dataset_directory1')\n flags.DEFINE_string('dataset_directory2',\n None,\n 'Dataset directory2')\n flags.DEFINE_string('output_directory',\n 'new_feature',\n 'Output directory')\n \n flags.mark_flag_as_required('dataset_directory1')\n flags.mark_flag_as_required('dataset_directory2')\n\n app.run(merge)\n","repo_name":"otavioon/COLA-2022-Tools","sub_path":"scripts/merge_features.py","file_name":"merge_features.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"33324878876","text":"import torch.nn as nn\r\nimport torch\r\nfrom torch.nn.functional import interpolate\r\nclass Basic_Unit(nn.Module):#ResNet_50的残差块\r\n def __init__(self,channel_in,channel_mid,channel_out,mid_stride=1,mid_padding=1,mid_dilation=1):\r\n super(Basic_Unit,self).__init__()\r\n self.conv1 = nn.Conv2d(channel_in,channel_mid,(1,1))\r\n self.bnorm1 = nn.BatchNorm2d(channel_mid)\r\n self.relu1 = nn.ReLU(inplace=True)\r\n self.conv2 = nn.Conv2d(channel_mid,channel_mid,(3,3),mid_stride,mid_padding,mid_dilation)#每一个残差块中只有在3*3卷积那里才有可能出现补零、空洞与非1步长\r\n self.bnorm2 = nn.BatchNorm2d(channel_mid)\r\n self.relu2 = nn.ReLU(inplace=True)\r\n self.conv3 = nn.Conv2d(channel_mid,channel_out,(1,1))\r\n self.bnorm3 = nn.BatchNorm2d(channel_out)\r\n if mid_stride!=1 or channel_in!=channel_out:\r\n self.bypass = nn.Sequential(\r\n nn.Conv2d(channel_in,channel_out,(1,1),mid_stride),\r\n nn.BatchNorm2d(channel_out)\r\n )\r\n else:\r\n self.bypass = None\r\n self.relu3 = nn.ReLU(inplace=True)\r\n def forward(self,x):\r\n original_x = x\r\n x = self.conv1(x)\r\n x = self.bnorm1(x)\r\n x = self.relu1(x)\r\n x = self.conv2(x)\r\n x = self.bnorm2(x)\r\n x = self.relu2(x)\r\n x = self.conv3(x)\r\n x = self.bnorm3(x)\r\n if self.bypass!=None:\r\n original_x = self.bypass(original_x)\r\n x = torch.add(x,original_x)\r\n x = self.relu3(x)\r\n return x\r\n\r\nclass stage0(nn.Module):\r\n def __init__(self):\r\n super(stage0,self).__init__()\r\n self.conv1 = nn.Conv2d(3,64,(7,7),2,3)\r\n self.bnorm1 = nn.BatchNorm2d(64)\r\n self.relu1 = nn.ReLU(inplace=True)\r\n self.maxpool1 = nn.MaxPool2d(3,2,1)\r\n def forward(self,x):\r\n x = self.conv1(x)\r\n x = self.bnorm1(x)\r\n x = self.relu1(x)\r\n x = self.maxpool1(x)\r\n return x\r\nclass stage1(nn.Module):\r\n def __init__(self):\r\n super(stage1,self).__init__()\r\n self.resblock1 = Basic_Unit(64,64,256)\r\n self.resblock2 = Basic_Unit(256,64,256)\r\n self.resblock3 = Basic_Unit(256,64,256)\r\n def forward(self,x):\r\n x = self.resblock1(x)\r\n x = self.resblock2(x)\r\n x = self.resblock3(x)\r\n return x\r\nclass stage2(nn.Module):\r\n def __init__(self):\r\n super(stage2,self).__init__()\r\n self.resblock1 = Basic_Unit(256,128,512,2)\r\n self.resblock2 = Basic_Unit(512,128,512,1)\r\n self.resblock3 = Basic_Unit(512,128,512)\r\n self.resblock4 = Basic_Unit(512,128,512)\r\n def forward(self, x):\r\n x = self.resblock1(x)\r\n x = self.resblock2(x)\r\n x = self.resblock3(x)\r\n x = self.resblock4(x)\r\n return x\r\nclass stage3(nn.Module):\r\n def __init__(self):\r\n super(stage3, self).__init__()\r\n self.resblock1 = Basic_Unit(512,256,1024)\r\n self.resblock2 = Basic_Unit(1024,256,1024,1,2,2)\r\n self.resblock3 = Basic_Unit(1024, 256, 1024, 1, 2, 2)\r\n self.resblock4 = Basic_Unit(1024, 256, 1024, 1, 2, 2)\r\n self.resblock5 = Basic_Unit(1024, 256, 1024, 1, 2, 2)\r\n self.resblock6 = Basic_Unit(1024, 256, 1024, 1, 2, 2)\r\n def forward(self, x):\r\n x = self.resblock1(x)\r\n x = self.resblock2(x)\r\n x = self.resblock3(x)\r\n x = self.resblock4(x)\r\n x = self.resblock5(x)\r\n x = self.resblock6(x)\r\n return x\r\nclass stage4(nn.Module):\r\n def __init__(self):\r\n super(stage4, self).__init__()\r\n self.resblock1 = Basic_Unit(1024,512,2048,1,2,2)\r\n self.resblock2 = Basic_Unit(2048,512,2048,1,4,4)\r\n self.resblock3 = Basic_Unit(2048, 512, 2048, 1, 4, 4)\r\n def forward(self,x):\r\n x = self.resblock1(x)\r\n x = self.resblock2(x)\r\n x = self.resblock3(x)\r\n return x\r\n\r\nclass ASPP(nn.Module):\r\n def __init__(self):\r\n super(ASPP,self).__init__()\r\n self.branch1_conv = nn.Conv2d(2048,256,(1,1),1)\r\n self.branch1_bnorm = nn.BatchNorm2d(256)\r\n self.branch1_relu = nn.ReLU(inplace=True)\r\n self.branch2_conv = nn.Conv2d(2048,256,(3,3),1,12,12)\r\n self.branch2_bnorm = nn.BatchNorm2d(256)\r\n self.branch2_relu = nn.ReLU(inplace=True)\r\n self.branch3_conv = nn.Conv2d(2048,256,(3,3),1,24,24)\r\n self.branch3_bnorm = nn.BatchNorm2d(256)\r\n self.branch3_relu = nn.ReLU(inplace=True)\r\n self.branch4_conv = nn.Conv2d(2048, 256, (3, 3), 1, 36, 36)\r\n self.branch4_bnorm = nn.BatchNorm2d(256)\r\n self.branch4_relu = nn.ReLU(inplace=True)\r\n self.branch5_avgpool = nn.AvgPool2d(63)\r\n self.branch5_conv = nn.Conv2d(2048,256,(1,1))\r\n self.branch5_bnorm =nn.BatchNorm2d(256)\r\n self.branch5_relu = nn.ReLU(inplace=True)\r\n self.last_conv = nn.Conv2d(1280,256,(1,1))\r\n self.last_bnorm = nn.BatchNorm2d(256)\r\n self.last_relu = nn.ReLU(inplace=True)\r\n #不用Adaptive average pooling是因为对于正方形平面而言没有效果,相当于什么也没做\r\n def forward(self,x):\r\n upper_size = x.size()[2]\r\n x1 = self.branch1_conv(x)\r\n x1 = self.branch1_bnorm(x1)\r\n x1 = self.branch1_relu(x1)\r\n x2 = self.branch2_conv(x)\r\n x2 = self.branch2_bnorm(x2)\r\n x2 = self.branch2_relu(x2)\r\n x3 = self.branch3_conv(x)\r\n x3 = self.branch3_bnorm(x3)\r\n x3 = self.branch3_relu(x3)\r\n x4 = self.branch4_conv(x)\r\n x4 = self.branch4_bnorm(x4)\r\n x4 = self.branch4_relu(x4)\r\n x5 = self.branch5_avgpool(x)\r\n x5 = self.branch5_conv(x5)\r\n x5 = self.branch5_bnorm(x5)\r\n x5 = self.branch5_relu(x5)\r\n x5 = interpolate(x5,size=[upper_size,upper_size],mode='nearest')\r\n x_concat = torch.cat((x1,x2,x3,x4,x5),dim=1)\r\n x_last = self.last_conv(x_concat)\r\n x_last = self.last_bnorm(x_last)\r\n x_last = self.last_relu(x_last)\r\n return x_last\r\n\r\nclass Deeplab_v3(nn.Module):\r\n def __init__(self):\r\n super(Deeplab_v3,self).__init__()\r\n self.stage0 = stage0()\r\n self.stage1 = stage1()\r\n self.stage2 = stage2()\r\n self.stage3 = stage3()\r\n self.stage4 = stage4()\r\n self.ASPP = ASPP()\r\n self.conv1 = nn.Conv2d(256,256,(3,3),padding=1)\r\n self.bnorm1 = nn.BatchNorm2d(256)\r\n self.relu1 = nn.ReLU(inplace=True)\r\n self.conv2 = nn.Conv2d(256,21,(1,1))\r\n self.bnorm2 = nn.BatchNorm2d(21)\r\n self.relu2 = nn.ReLU(inplace=True)\r\n def forward(self,x):\r\n upper_size = x.size()[2]\r\n x = self.stage0(x)\r\n x = self.stage1(x)\r\n x = self.stage2(x)\r\n x = self.stage3(x)\r\n x = self.stage4(x)\r\n x = self.ASPP(x)\r\n x = self.conv1(x)\r\n x = self.bnorm1(x)\r\n x = self.relu1(x)\r\n x = self.conv2(x)\r\n x = self.bnorm2(x)\r\n x = self.relu2(x)\r\n x = interpolate(x,size=[upper_size,upper_size],mode='bilinear',align_corners=True)\r\n return x\r\n\r\n","repo_name":"BaeHann/pytorch_Deeplab_v3_UNet","sub_path":"Deeplab_v3/DeepLab_V3.py","file_name":"DeepLab_V3.py","file_ext":"py","file_size_in_byte":7130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2643292935","text":"# coding:utf-8\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nimport sys, os\nimport qtawesome\nimport asyncio\nfrom gui.widget.contact.AboutSystem import AboutSystemWidget\nfrom gui.widget.contact.FeedbackWidget import FeedbackWidget\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton\nimport config\nfrom camera import *\nimport time\n\nimage = 0\n\n\nclass MainUi(QtWidgets.QMainWindow):\n mouse_event_signal = QtCore.pyqtSignal(int, object)\n\n def __init__(self):\n super().__init__()\n\n self.__init_UI()\n\n self.original_window_geometry = self.geometry()\n\n self.current_active_widget = None\n self.mouse_event_signal.connect(self.__mouse_event)\n self.show_content_widget(command_id=0)\n\n def __init_UI(self):\n\n\n\n self.resize(1024, 768)\n self.setWindowIcon(QtGui.QIcon(config.APP_ICON))\n self.setWindowIconText(config.APP_NAME)\n self.setWhatsThis(config.APP_NAME)\n self.setWindowTitle(config.APP_NAME)\n\n self.main_widget = QtWidgets.QWidget() # 创建窗口主部件\n self.main_layout = QtWidgets.QGridLayout() # 创建主部件的网格布局\n\n self.main_layout.setContentsMargins(0, 0, 0, 0)\n self.main_widget.setLayout(self.main_layout) # 设置窗口主部件布局为网格布局\n\n self.title_widget = TitleWidget(self.mouse_event_signal) # 顶部显示部件\n self.title_widget.setObjectName('title_widget')\n self.title_widget.setStyleSheet('''background:#C92141;''')\n\n self.title_layout = QtWidgets.QGridLayout()\n self.title_layout.setContentsMargins(0, 0, 0, 0)\n self.title_layout.setSpacing(0)\n self.title_widget.setLayout(self.title_layout)\n\n self.bottom_widget = QtWidgets.QWidget() # 底部部分\n self.bottom_widget.setObjectName('bottom_widget')\n self.bottom_widget.setStyleSheet('''background:#C92141''')\n self.bottom_layout = QtWidgets.QHBoxLayout()\n self.bottom_widget.setLayout(self.bottom_layout)\n\n self.start_button = QtWidgets.QPushButton('启 动')\n self.start_button.setObjectName('start_button')\n self.start_button.setFixedWidth(100)\n self.start_button.clicked.connect(self.start_button_clicked)\n # self.bottom_layout.addWidget(self.start_button)\n\n self.content_widget = QtWidgets.QWidget()\n self.content_widget.setObjectName('content_widget')\n self.content_layout = QtWidgets.QHBoxLayout()\n self.content_layout.setContentsMargins(0, 0, 0, 0)\n self.content_widget.setLayout(self.content_layout)\n\n self.main_layout.addWidget(self.title_widget, 0, 0, 1, 12)\n self.main_layout.addWidget(self.content_widget, 1, 0, 12, 12)\n self.main_layout.addWidget(self.bottom_widget, 13, 0, 1, 12)\n self.setCentralWidget(self.main_widget) # 设置窗口主部件\n\n self.icon_widget = QtWidgets.QWidget() # 顶部显示控件\n self.icon_widget.setObjectName('icon_widget')\n self.icon_widget.setStyleSheet('''border-top-left-radius:10px;''')\n self.icon_layout = QtWidgets.QHBoxLayout()\n self.icon_layout.setAlignment(QtCore.Qt.AlignLeft)\n self.icon_widget.setLayout(self.icon_layout)\n\n app_icon = QtWidgets.QLabel()\n app_icon.setFixedSize(40, 40)\n pixMap = QtGui.QPixmap(config.APP_ICON).scaled(app_icon.width(), app_icon.height())\n app_icon.setPixmap(pixMap)\n app_name = QtWidgets.QLabel(config.APP_NAME)\n app_name.setStyleSheet('font-family: \"Microsoft YaHei\"; font-size:35px;')\n self.icon_layout.addWidget(app_icon)\n self.icon_layout.addWidget(app_name)\n\n self.control_widget = QtWidgets.QWidget() # 顶部控制控件\n self.control_widget.setObjectName('top_control_widget')\n self.control_widget.setStyleSheet('''border-top-right-radius:10px;''')\n self.control_layout = QtWidgets.QGridLayout()\n self.control_widget.setLayout(self.control_layout)\n self.control_widget.resize(100, 40)\n\n self.left_close = QtWidgets.QPushButton(\"\") # 关闭按钮\n self.left_visit = QtWidgets.QPushButton(\"\") # 空白按钮\n self.left_mini = QtWidgets.QPushButton(\"\") # 最小化按钮\n self.control_layout.addWidget(self.left_mini, 0, 0, 1, 1)\n self.control_layout.addWidget(self.left_close, 0, 2, 1, 1)\n self.control_layout.addWidget(self.left_visit, 0, 1, 1, 1)\n\n self.title_layout.addWidget(self.icon_widget, 0, 0, 1, 9)\n self.title_layout.addWidget(self.control_widget, 0, 11, 1, 1)\n\n self.left_close.setFixedSize(20, 20) # 设置关闭按钮的大小\n self.left_visit.setFixedSize(20, 20) # 设置按钮大小\n self.left_mini.setFixedSize(20, 20) # 设置最小化按钮大小\n self.left_close.setToolTip('最小化')\n self.left_visit.setToolTip('适中大小')\n self.left_mini.setToolTip('最大化')\n self.left_close.clicked.connect(self.__close_window)\n self.left_visit.clicked.connect(self.__resize_window)\n self.left_mini.clicked.connect(self.__maximal_window)\n\n self.setWindowOpacity(1.0) # 设置窗口透明度\n self.setAttribute(QtCore.Qt.WA_TranslucentBackground) # 设置窗口背景透明\n self.setWindowFlag(QtCore.Qt.FramelessWindowHint) # 隐藏边框\n\n self.main_layout.setSpacing(0)\n\n self.__load_qss_stylesheet()\n self.__set_tray_function()\n\n def __load_qss_stylesheet(self):\n self.left_close.setStyleSheet(\n '''QPushButton{background:#F76677;border-radius:5px;}QPushButton:hover{background:red;}''')\n self.left_visit.setStyleSheet(\n '''QPushButton{background:#F7D674;border-radius:5px;}QPushButton:hover{background:yellow;}''')\n self.left_mini.setStyleSheet(\n '''QPushButton{background:#6DDF6D;border-radius:5px;}QPushButton:hover{background:green;}''')\n\n self.bottom_widget.setStyleSheet('''\n QWidget#bottom_widget{\n color:#232C51;\n background:#C92141;\n border-top:1px solid darkGray;\n border-bottom:1px solid darkGray;\n border-right:1px solid darkGray;\n border-bottom-right-radius:10px;\n border-bottom-left-radius:10px;\n }\n ''')\n\n self.setStyleSheet('''\n QWidget#content_widget{\n background:white;\n border-top:1px solid white;\n }\n QWidget#top_widget{\n border-top:1px solid white;\n background:#87CEFA;\n }\n QPushButton#start_button{\n border: none;\n color: #FFFFFF;\n border-radius: 10px;\n min-width: 150px;\n min-height: 50px;\n background-color: #3672A4;\n font-size: 20px;\n font-family: Microsoft YaHei;\n }\n QPushButton#start_button:hover{\n background-color:#266294;\n }\n ''')\n\n def start_button_clicked(self):\n start_time = time.time()\n\n image = open_camera()\n # cv2.imshow(' ', img)\n # cv2.waitKey(1)\n test = predict(image)\n print(time.time()-start_time)\n self.current_active_widget.send_results(test)\n\n def __set_tray_function(self):\n self.tray = QtWidgets.QSystemTrayIcon() # 创建系统托盘对象\n self.tray.setIcon(QtGui.QIcon(config.APP_ICON)) # 设置系统托盘图标\n self.tray.setToolTip(config.APP_NAME)\n self.tray_menu = QtWidgets.QMenu(QtWidgets.QApplication.desktop()) # 创建菜单\n self.RestoreAction = QtWidgets.QAction(u'还 原 ', self, triggered=self.show) # 添加一级菜单动作选项(还原主窗口)\n self.QuitAction = QtWidgets.QAction(u'退 出 ', self, triggered=self.close) # 添加一级菜单动作选项(退出程序)\n self.tray_menu.addAction(self.RestoreAction) # 为菜单添加动作\n self.tray_menu.addSeparator()\n self.tray_menu.addAction(self.QuitAction)\n self.tray.setContextMenu(self.tray_menu) # 设置系统托盘菜单\n\n def __mouse_event(self, command_id, mouseEvent):\n if command_id is 1:\n self.m_Position = mouseEvent.globalPos() - self.pos() # 获取鼠标相对窗口的位置\n mouseEvent.accept()\n self.setCursor(QtGui.QCursor(QtCore.Qt.OpenHandCursor)) # 更改鼠标图标\n elif command_id is 2:\n self.move(mouseEvent.globalPos() - self.m_Position) # 更改窗口位置\n mouseEvent.accept()\n elif command_id is 3:\n self.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))\n\n def __close_window(self):\n self.showMinimized()\n\n def __resize_window(self):\n self.setGeometry(self.original_window_geometry)\n desktop = QtWidgets.QApplication.desktop()\n x = (desktop.width() - self.width()) // 2\n y = (desktop.height() - self.height()) // 2\n self.move(x, y)\n\n def __maximal_window(self):\n # 得到桌面控件\n desktop = QtWidgets.QApplication.desktop()\n # 得到屏幕可显示尺寸\n rect = desktop.screenGeometry()\n # 设置窗口尺寸\n self.setGeometry(rect)\n\n def execute_clicked_command(self, command_id):\n self.switch_button_style(command_id)\n self.show_content_widget(command_id)\n\n def switch_button_style(self, command_id):\n left_button_group = [self.left_button_video, self.left_button_1, self.left_button_2, self.left_button_3,\n self.left_button_4, self.left_button_5, self.left_button_6, self.left_button_7,\n self.left_button_9]\n if command_id is 0:\n for left_button in left_button_group:\n left_button.setStyleSheet('background:none')\n else:\n for left_button_index, left_button in enumerate(left_button_group):\n if left_button_index is command_id - 1:\n left_button.setStyleSheet('background:#4A708B')\n else:\n left_button.setStyleSheet('background:none')\n\n def show_content_widget(self, command_id):\n if command_id is 0:\n selected_widget_class = ShowGoodsWidget\n else:\n selected_widget_class = None\n\n if selected_widget_class is not None:\n if self.current_active_widget is not None:\n self.current_active_widget.hide()\n\n content_widget = selected_widget_class.get_instance()\n self.content_layout.addWidget(content_widget)\n self.current_active_widget = content_widget\n self.current_active_widget.show()\n\n\nclass TitleWidget(QtWidgets.QWidget):\n def __init__(self, mouse_event_signal):\n super().__init__()\n self.mouse_event_signal = mouse_event_signal\n\n def mousePressEvent(self, QMouseEvent):\n if QMouseEvent.button() == QtCore.Qt.LeftButton:\n self.m_flag = True\n self.mouse_event_signal.emit(1, QMouseEvent)\n\n def mouseMoveEvent(self, QMouseEvent):\n if QtCore.Qt.LeftButton and self.m_flag:\n self.m_flag = True\n self.mouse_event_signal.emit(2, QMouseEvent)\n\n def mouseReleaseEvent(self, QMouseEvent):\n self.m_flag = False\n self.mouse_event_signal.emit(3, QMouseEvent)","repo_name":"MichaelW17/GUI-Rotate","sub_path":"gui/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":11479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25211882004","text":"import os\nfrom superdesk import json\nfrom datetime import datetime\n\n\ndef load_codes(filename):\n with open(filename, \"r\") as f:\n codes = json.load(f)\n return codes\n\n\ndirname = os.path.dirname(os.path.realpath(__file__))\ndata_subject_codes = os.path.join(dirname, \"data\", \"subject_codes.json\")\nsubject_codes = load_codes(data_subject_codes)\n\n\ndef init_app(app) -> None:\n last_modified = datetime(2012, 7, 10)\n app.subjects.register(subject_codes, last_modified)\n","repo_name":"superdesk/superdesk-core","sub_path":"superdesk/io/iptc.py","file_name":"iptc.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"27944429737","text":"#!/usr/bin/env python3\n\"\"\"\nfiltered_logger\n\"\"\"\n\nimport logging, os\nimport mysql.connector\nfrom mysql.connector import Error\n\ndef filtered_logger(fields: list, redaction: str, message: str, separator: str) -> str:\n \"\"\"returns the log message obfuscated\"\"\"\n for field in fields:\n message = message.replace(\n field + '=' + message.split(separator)[1].split(';')[0], field + '=' + redaction)\n return message\n\ndef get_logger() -> logging.Logger:\n \"\"\"returns a logging.Logger object\"\"\"\n logger = logging.getLogger('user_data')\n logger.setLevel(logging.INFO)\n logger.propagate = False\n\n formatter = logging.Formatter(\n 'name=%(name)s;action=%(action)s;%(message)s')\n\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n return logger\n\ndef get_db():\n try:\n # Fetching the environment variables\n user = os.getenv('PERSONAL_DATA_DB_USERNAME', 'root')\n password = os.getenv('PERSONAL_DATA_DB_PASSWORD', '')\n host = os.getenv('PERSONAL_DATA_DB_HOST', 'localhost')\n db_name = os.getenv('PERSONAL_DATA_DB_NAME')\n\n if not db_name:\n raise ValueError(\"PERSONAL_DATA_DB_NAME must be set in the environment.\")\n\n # Creating the connection to the database\n connection = mysql.connector.connect(\n host=host,\n user=user,\n password=password,\n database=db_name\n )\n \n if connection.is_connected():\n return connection\n else:\n raise ConnectionError(\"Failed to connect to the database.\")\n\n except Error as e:\n print(\"Error while connecting to database:\", e)\n return None\n\nconnection = get_db()\n\nif connection:\n print(\"Successfully connected to the database.\")\n connection.close()","repo_name":"soOwasTaken/holbertonschool-web_back_end","sub_path":"personal_data/filtered_logger.py","file_name":"filtered_logger.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36039438670","text":"import Ivanova_es.dataset_structure as d\ndef add_data(dataset: dict, latin_name: str, cen_year: int, whire_htap: bool, horz_plant: bool):\n dataset[latin_name] = {\n 'cen_year': cen_year,\n 'whire_htap': whire_htap,\n 'horz_plant': horz_plant\n }\n\nadd_data(d.dataset, 'FRAXINUS PENNSYLVANICA', 2006, False, False)\nprint(d.dataset)","repo_name":"igortereshchenko/datascience","sub_path":"Ivanova_es/dataset_insert_manual.py","file_name":"dataset_insert_manual.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"563312481","text":"import os\nfrom app import create_app, db\nfrom app.models import User\nfrom app.models import User, Location, Project\nfrom flask.ext.script import Manager, Shell\nfrom flask.ext.migrate import Migrate, MigrateCommand\n\napp = create_app(os.getenv('FLASK_CONFIG') or 'default')\nmanager = Manager(app)\nmigrate = Migrate(app, db)\n\ndef make_shell_context():\n return dict(app=app, db=db, User=User)\n\nmanager.add_command('shell', Shell(make_context=make_shell_context))\nmanager.add_command('db', MigrateCommand)\n\n@manager.command\ndef test():\n \"\"\"Run the unit tests.\n >> python manage.py test\n \"\"\"\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n\n@manager.command\ndef bootstrap():\n \"\"\"Bootstrap database\n >> python manage.py bootstrap\n \"\"\"\n# db.drop_all()\n# db.create_all()\n# user = User(email='jeff@walkerjeff.com',\n# username='jeff',\n# password='password',\n# confirmed=True,\n# name='Jeff Walker',\n# location='Brunswick, ME')\n# db.session.add(user)\n location = Location(name='Androscoggin River',\n latitude=43.9211128,\n longitude=-69.9603785)\n db.session.add(location)\n project = Project(name=\"Jeff's Backyard\")\n db.session.add(project)\n db.session.commit()\n\n@manager.command\ndef deploy():\n from flask.ext.migrate import upgrade\n\n # migrate database to latest revision\n upgrade()\n\nif __name__ == '__main__':\n manager.run()\n","repo_name":"walkerjeffd/open-water-demo","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9681777466","text":"# ****************************************************************************************************\n#\n# Name: Joi Wilson\n# Course: COSC 2110 Computer Languages: Python\n# Assignment: FtoC.py\n# Due Date: 11/9/2020\n# Description:\n# Write a program that converts celsius to fahrenheit\n#\n# ****************************************************************************************************\n\nimport tkinter.messagebox\nimport tkinter\n\n\n# ****************************************************************************************************\n\nclass CelsiusGUI:\n def __init__(self):\n self.main_window = tkinter.Tk()\n self.main_window.title(\"converter\")\n self.top_frame = tkinter.Frame(self.main_window)\n self.mid_frame = tkinter.Frame(self.main_window)\n self.bottom_frame = tkinter.Frame(self.main_window)\n\n self.label1 = tkinter.Label(self.top_frame, text='Celsius:')\n self.label2 = tkinter.Label(self.mid_frame, text='Fahrenheit:')\n self.entry1 = tkinter.Entry(self.top_frame, width=10)\n\n self.convert_button = tkinter.Button(self.top_frame,\n text='Convert to Fahrenheit',\n command=self.conFahr)\n self.convert_button2 = tkinter.Button(self.mid_frame,\n text='Convert to Celsius',\n command=self.conCel)\n self.quit_button = tkinter.Button(self.bottom_frame,\n text='Quit',\n command=self.main_window.destroy)\n self.value = tkinter.StringVar()\n\n self.result = tkinter.Entry(self.mid_frame, textvariable=self.value)\n self.result2 = tkinter.Entry(self.top_frame, textvariable=self.value)\n\n self.quit_button.pack(side='left')\n self.convert_button.pack(side='right')\n self.convert_button2.pack(side='right')\n self.label1.pack(side='left')\n self.entry1.pack(side='left')\n self.label2.pack(side='left')\n\n self.result.pack(side='left')\n self.top_frame.pack()\n self.mid_frame.pack()\n self.bottom_frame.pack()\n tkinter.mainloop()\n\n # ****************************************************************************************************\n\n def conFahr(self):\n fahrenheit = float(self.entry1.get())\n celsius = float((1.8 * fahrenheit) + 32)\n self.value.set(celsius)\n\n # ****************************************************************************************************\n\n def conCel(self):\n celsius = float(self.entry1.get())\n fahrenheit = float((celsius - 32) * 5 / 9)\n self.value.set(fahrenheit)\n\n\nif __name__ == '__main__':\n my_gui = CelsiusGUI()\n","repo_name":"joimakanani/Code-Portfolio","sub_path":"FtoC.py","file_name":"FtoC.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38440034843","text":"import numpy as np\nimport random\n\nimport PMSPRestrictions\n\n\nclass PMSPSolution:\n def __init__(self,\n m : int,\n n : int,\n restrictions : PMSPRestrictions,\n order_of_tasks : list = None):\n self.m = m\n self.n = n\n self.restrictions = restrictions\n self.c = [0] * m\n self.fitness = -1 \n self.order_of_tasks = order_of_tasks\n\n @staticmethod\n def random_instance(restrictions : PMSPRestrictions):\n # Randomizing the machines, so the first ones aren't favored\n machines = list(range(restrictions.m))\n random.shuffle(machines)\n \n # Initializes with zero task for each machine\n tasks_per_machine = [0] * restrictions.m\n\n # Randomizes the number of tasks for each machine\n tasks_left = restrictions.n\n for machine in machines:\n ntasks = random.randint(0, tasks_left)\n tasks_per_machine[machine] = ntasks\n tasks_left -= ntasks\n \n # Adding the remaining tasks to the last machine\n tasks_per_machine[restrictions.m-1] += tasks_left\n\n # Randomizes the tasks for each machine\n order_of_tasks = []\n tasks = list(range(restrictions.n))\n random.shuffle(tasks)\n\n for no_tasks in tasks_per_machine:\n mtasks = []\n for i in range(no_tasks):\n mtasks.append(tasks.pop())\n order_of_tasks.append(mtasks)\n\n # Generating the instance\n instance = PMSPSolution(restrictions.m, restrictions.n, restrictions)\n instance.order_of_tasks = order_of_tasks\n instance.fitness = restrictions.evaluate(instance)\n return instance\n \n #restrictions é as restriçoes feitas com o que leu da instancia, order_of_tasks é a matriz q representa a solução\n @staticmethod\n def create_instance(restrictions : PMSPRestrictions,\n order_of_tasks: list):\n instance = PMSPSolution(restrictions.m, restrictions.n, restrictions)\n instance.order_of_tasks = order_of_tasks\n if(restrictions.check_validity(instance)):\n instance.fitness = restrictions.evaluate(instance)\n return instance\n else:\n raise Exception(\"Instancia inválida\")\n \n \n","repo_name":"diegodimer/pmspgeneticalg","sub_path":"metaheuristica/PMSPSolution.py","file_name":"PMSPSolution.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18819099604","text":"#import urllib.request, json\n#import pandas as pd\nimport requests\n#\n#stops = ['060814','340060','340061','340101','40115' ,'170022']\nstops = ['340060','340061','40115','060814']\n\nfor t in range(len(stops)):\n stopnum = stops[t]\n #vrisko to onoma tis stasis\n stopnamelink=\"http://telematics.oasa.gr/api/?act=getStopNameAndXY&p1=\"+stopnum\n r = requests.get(stopnamelink)\n table3=r.json()\n #print(table3)\n print(\"--------------\")\n print (\"*\"+table3[0]['stop_descr']+\"*\")\n\n######\n###### vrisko to lineid px 218\n linenamelink=\"http://telematics.oasa.gr/api/?act=webRoutesForStop&p1=\"+stopnum\n r = requests.get(linenamelink)\n table4=r.json()\n #print(table4) ##debug\n######\n link=\"http://telematics.oasa.gr/api/?act=getStopArrivals&p1=\"+stopnum\n r = requests.get(link)\n table=r.json()\n #print(table)\n #print(r) gia na do response\n #length = len(table)\n if table != None:\n for i in range(len(table)):\n #print (table)\n #print(table[i]['route_code'])\n routecode=table[i]['route_code']\n time=table[i]['btime2']\n #print (length)\n #print (routecode)\n link2=\"http://telematics.oasa.gr/api/?act=getRouteName&p1=\"+routecode\n r = requests.get(link2)\n #print(r)\n table2=r.json()\n #print (table)\n linename=table2[0]['route_descr']\n #####dokimi gia lineid\n linenumber = \"\"\n for e in range(len(table4)):\n if routecode==table4[e]['RouteCode']:\n linenumber=table4[e]['LineID']\n print (\"[\"+linenumber+\"] \"+linename+\" σε \"+time+\" λεπτα!\")\n\n","repo_name":"haunter81/oasa_telematics","sub_path":"oasa.py","file_name":"oasa.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41966635600","text":"# Auxiliary routine for solving a linear system A x = b. Calls our own LUP decomposition routine, as well as the forward and backward substitution routines, in LUP.py.\n# By L. van Veen, OnTechU, 2021.\nimport numpy as np\nfrom LUP import *\n\ndef linsolve(A,b):\n # Solve A x = b with LUP decomposition.\n L,U,P,par,ok = LUP(A)\n if ok == 0:\n print(\"Warning: nearly degenerate Jacobian!\")\n y = ForwardSub(L,P,b)\n x = BackwardSub(U,y)\n return x\n","repo_name":"2072U/2072U_course_codes","sub_path":"Linear_systems/linsolve.py","file_name":"linsolve.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"8350464269","text":"#\n# @lc app=leetcode id=245 lang=python3\n#\n# [245] Shortest Word Distance III\n#\n\nfrom typing import List\n\n\n# @lc code=start\nclass Solution:\n def shortestWordDistance(self, wordsDict: List[str], word1: str, word2: str) -> int:\n if word1 == word2:\n return self.shortestSameWordDistance(wordsDict, word1)\n else:\n return self.shortestDifferentWordDistance(wordsDict, word1, word2)\n\n def shortestSameWordDistance(self, wordsDict: List[str], word: str) -> int:\n wordIndecies = []\n for i, w in enumerate(wordsDict):\n if w == word:\n wordIndecies.append(i)\n\n shortestPath = 10000000\n for i in range(len(wordIndecies)-1):\n shortestPath = min(shortestPath, wordIndecies[i+1]-wordIndecies[i])\n\n return shortestPath\n\n\n def shortestDifferentWordDistance(self, wordsDict: List[str], word1: str, word2: str) -> int:\n pos1 = pos2 = -1\n shortestPath = 10000000\n for i, word in enumerate(wordsDict):\n if word == word1:\n pos1 = i\n if word == word2:\n pos2 = i\n if pos1 >= 0 and pos2 >= 0:\n shortestPath = min(shortestPath, abs(pos1-pos2))\n\n return shortestPath\n\n# 他のshortest pathと基本同じ、同じ文字が来る時用の場合分けだけやる感じで\n# @lc code=end\n\n","repo_name":"esakat/leetcode-python","sub_path":"245.shortest-word-distance-iii.py","file_name":"245.shortest-word-distance-iii.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32377982650","text":"\"\"\"\nSimple, given a string of words, return the length of the shortest word(s).\n\nString will never be empty and you do not need to account for different data types.\n\"\"\"\n\ndef find_short(s):\n # Split the input string into words\n words = s.split()\n \n # Initialize a variable to keep track of the shortest word length\n shortest_length = float('inf') # Initialize to positive infinity\n \n # Iterate through the words and update the shortest_length if needed\n for word in words:\n word_length = len(word)\n if word_length < shortest_length:\n shortest_length = word_length\n \n return shortest_length\n\n# Test cases\nprint(find_short(\"This is a test\")) # Output: 1 (Shortest word is \"a\")\nprint(find_short(\"These are some words\")) # Output: 3 (Shortest word is \"are\")\nprint(find_short(\"bitcoin take over the world maybe who knows perhaps\")) # Output: 3 (Shortest word is \"the\")\n\n\n# OR\n\ndef find_short_two(s):\n return min(len(x) for x in s.split())","repo_name":"Jared951/codewars","sub_path":"shortest_word.py","file_name":"shortest_word.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40076840747","text":"# SPDX-License-Identifier: MIT\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Union\nfrom xml.etree import ElementTree\n\nfrom .admindata import AdminData\nfrom .createsdgs import create_sdgs_from_et\nfrom .dataobjectproperty import DataObjectProperty\nfrom .element import IdentifiableElement\nfrom .exceptions import odxassert\nfrom .nameditemlist import NamedItemList\nfrom .odxlink import OdxDocFragment, OdxLinkDatabase, OdxLinkId, OdxLinkRef\nfrom .specialdatagroup import SpecialDataGroup\nfrom .tablerow import TableRow\nfrom .utils import dataclass_fields_asdict\n\nif TYPE_CHECKING:\n from .diaglayer import DiagLayer\n\n\n@dataclass\nclass Table(IdentifiableElement):\n \"\"\"This class represents a TABLE.\"\"\"\n semantic: Optional[str]\n key_label: Optional[str]\n struct_label: Optional[str]\n admin_data: Optional[AdminData]\n key_dop_ref: Optional[OdxLinkRef]\n table_rows_raw: List[Union[TableRow, OdxLinkRef]]\n # TODO: table_diag_comm_connectors\n sdgs: List[SpecialDataGroup]\n\n @staticmethod\n def from_et(et_element: ElementTree.Element, doc_frags: List[OdxDocFragment]) -> \"Table\":\n \"\"\"Reads a TABLE.\"\"\"\n kwargs = dataclass_fields_asdict(IdentifiableElement.from_et(et_element, doc_frags))\n odx_id = kwargs[\"odx_id\"]\n semantic = et_element.get(\"SEMANTIC\")\n key_label = et_element.findtext(\"KEY-LABEL\")\n struct_label = et_element.findtext(\"STRUCT-LABEL\")\n admin_data = AdminData.from_et(et_element.find(\"ADMIN-DATA\"), doc_frags)\n key_dop_ref = OdxLinkRef.from_et(et_element.find(\"KEY-DOP-REF\"), doc_frags)\n\n table_rows_raw: List[Union[OdxLinkRef, TableRow]] = []\n for sub_elem in et_element:\n if sub_elem.tag == \"TABLE-ROW\":\n table_rows_raw.append(\n TableRow.from_et(sub_elem, doc_frags, table_ref=OdxLinkRef.from_id(odx_id)))\n elif sub_elem.tag == \"TABLE-ROW-REF\":\n table_rows_raw.append(OdxLinkRef.from_et(sub_elem, doc_frags))\n\n sdgs = create_sdgs_from_et(et_element.find(\"SDGS\"), doc_frags)\n\n return Table(\n semantic=semantic,\n key_label=key_label,\n struct_label=struct_label,\n admin_data=admin_data,\n key_dop_ref=key_dop_ref,\n table_rows_raw=table_rows_raw,\n sdgs=sdgs,\n **kwargs)\n\n @property\n def key_dop(self) -> Optional[DataObjectProperty]:\n \"\"\"The key data object property associated with this table.\"\"\"\n return self._key_dop\n\n @property\n def table_rows(self) -> NamedItemList[TableRow]:\n \"\"\"The table rows (both local and referenced) in this table.\"\"\"\n return self._table_rows\n\n def _build_odxlinks(self) -> Dict[OdxLinkId, Any]:\n result = {self.odx_id: self}\n\n for table_row_wrapper in self.table_rows_raw:\n if isinstance(table_row_wrapper, TableRow):\n result.update(table_row_wrapper._build_odxlinks())\n\n return result\n\n def _resolve_odxlinks(self, odxlinks: OdxLinkDatabase) -> None:\n self._key_dop: Optional[DataObjectProperty] = None\n if self.key_dop_ref is not None:\n self._key_dop = odxlinks.resolve(self.key_dop_ref, DataObjectProperty)\n\n table_rows = []\n for table_row_wrapper in self.table_rows_raw:\n if isinstance(table_row_wrapper, TableRow):\n table_row = table_row_wrapper\n table_row._resolve_odxlinks(odxlinks)\n else:\n odxassert(isinstance(table_row_wrapper, OdxLinkRef))\n table_row = odxlinks.resolve(table_row_wrapper, TableRow)\n\n table_rows.append(table_row)\n\n self._table_rows = NamedItemList(table_rows)\n\n def _resolve_snrefs(self, diag_layer: \"DiagLayer\") -> None:\n for table_row_wrapper in self.table_rows_raw:\n if isinstance(table_row_wrapper, TableRow):\n table_row_wrapper._resolve_snrefs(diag_layer)\n","repo_name":"mercedes-benz/odxtools","sub_path":"odxtools/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"81"} +{"seq_id":"22318942939","text":"import logging\nimport requests\nfrom telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nimport re\n#import telepot\n#from flask import Flask\nimport os\nPORT = int(os.environ.get('PORT', 5000))\n# import daemon #supposed to run the bot without running the file\n\n\n# from telegram_bot import TelegramBot\ntoken = '1192184755:AAFkF7Vp3HYmMUioW-V9Ny1bFunwZep2-Os'\n#TelegramBot = telepot.Bot(token)\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\n\nurl = \"https://api.telegram.org/bot1192184755:AAFkF7Vp3HYmMUioW-V9Ny1bFunwZep2-Os\"\n\n# create func that get chat id\n\n\ndef start(update, context):\n #reply_keyboard = [['/help', '/joke', '/enter', '/status']]\n update.message.reply_text(\"Hello! Welcome To Railway! How can I help?\")\n\n\ndef help(update, context):\n\n update.message.reply_text(\n \"You can contact us @whysocereals or @ernestlim8 for any queries! \")\n\n\ndef enter(update, context):\n update.message.reply_text(\n \"Here's the link to the webpage! https://railway-platform.herokuapp.com/\")\n\n\ndef echo(update, context):\n reply_keyboard = [['/help', '/status', '/enter']]\n update.message.reply_text(\"Oh no! Choose a valid command!\",\n reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))\n\n\ndef status(update, context):\n update.message.reply_text(\n \"Your current responses is: \"\n )\n\n\ndef get_url():\n\n contents = requests.get('https://random.dog/woof.json').json()\n url = contents['url']\n return url\n\n # def get_image_url():\n # allowed_extension = ['jpg', 'jpeg', 'png']\n # file_extension = ''\n # while file_extension not in allowed_extension:\n # url = get_url()\n # file_extension = re.search(\"([^.]*)$\", url).group(1).lower()\n # return url\n\n\ndef bop(bot, update):\n # url = https://specials-images.forbesimg.com/imageserve/1143890227/960x0.jpg?fit=scale\n chat_id = update.message.chat_id\n bot.send_photo(\n chat_id=chat_id, photo=url)\n\n\ndef get_joke(update, context):\n \"\"\"Fetch joke from the web and return.\"\"\"\n url = 'https://icanhazdadjoke.com/'\n headers = {'Accept': 'application/json'}\n joke_msg = requests.get(url, headers=headers).json().get('joke')\n update.message.reply_text(joke_msg)\n\n\ndef main():\n \"\"\"Start the bot.\"\"\"\n # Create the Updater and pass it your bot's token.\n # Make sure to set use_context=True to use the new context based callbacks\n # Post version 12 this will no longer be necessary\n updater = Updater(\n \"1192184755:AAFkF7Vp3HYmMUioW-V9Ny1bFunwZep2-Os\", use_context=True)\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # on different commands - answer in Telegram\n dp.add_handler(CommandHandler(\"start\", start))\n\n dp.add_handler(CommandHandler(\"joke\", get_joke))\n dp.add_handler(CommandHandler(\"help\", help))\n dp.add_handler(CommandHandler(\"enter\", enter))\n dp.add_handler(CommandHandler(\"status\", status))\n # dp.add_handler(CommandHandler(\"bop\", bop)) # spoilt function\n dp.add_handler(MessageHandler(Filters.text, echo))\n # Start the Bot\n updater.start_webhook(listen=\"0.0.0.0\",\n port=int(PORT),\n url_path='1192184755:AAFkF7Vp3HYmMUioW-V9Ny1bFunwZep2-Os')\n\n updater.bot.setWebhook('https://mysterious-wildwood-07373.herokuapp.com/' +\n '1192184755:AAFkF7Vp3HYmMUioW-V9Ny1bFunwZep2-Os')\n\n# Run the bot until you press Ctrl-C or the process receives SIGINT,\n# SIGTERM or SIGABRT. This should be used most of the time, since\n# start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"braydenlong/railwaybot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7186711089","text":"import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\nprint = sys.stdout.write\n\nn = int(input())\nnear_list = [[] for _ in range(n + 1)]\nfor _ in range(n - 1):\n a, b = map(int, input().split())\n near_list[a].append(b)\n near_list[b].append(a)\ndepth_node = [[] for _ in range(n + 1)]\n\ndef BFS(x):\n visited = [False] * (n + 1)\n q = deque()\n q.append(x)\n visited[x] = True\n while q:\n now = q.popleft()\n for next in near_list[now]:\n if not visited[next]:\n visited[next]= True\n depth_node[next] = [next, now] + depth_node[now]\n q.append(next)\nBFS(1)\nm = int(input())\nfor _ in range(m):\n a, b = map(int, input().split())\n if len(depth_node[a]) > len(depth_node[b]):\n a,b= b,a\n a_parents = deque(depth_node[a])\n b_parents = deque(depth_node[b])\n while a!= b:\n if len(b_parents) >len(a_parents):\n b = b_parents.popleft()\n else:\n a = a_parents.popleft()\n b = b_parents.popleft()\n print(str(a)+\"\\n\")","repo_name":"johnny9696/baekjoon_problem","sub_path":"10000~end/11000~11999/11438.py","file_name":"11438.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20492338881","text":"\n\ndef dependency_dictionary(dataset, latest=0):\n \"\"\"Creates a dictionary of all the different dependencies in the dataset where every\n dependency contains a list of the packages where this dependency is called\"\"\"\n dep_dict = {}\n\n for package in dataset.values():\n if latest == 0:\n for version in package:\n _dict_add(version, dep_dict, version.name)\n else:\n _dict_add(package[-1], dep_dict, package[-1].name) #Most recent version of packages\n\n return dep_dict\n\n\ndef _dict_add(node, dict, package_name):\n \"\"\"Help function for generating dependency dictionary\"\"\"\n if node.name in dict.keys():\n if package_name not in dict[node.name]:\n dict[node.name].append(package_name)\n else:\n dict[node.name] = [package_name]\n\n if node.dependencies != []:\n for dependency in node.dependencies:\n _dict_add(dependency, dict, package_name)\n\n\ndef dep_dict_stats(dataset, latest=0):\n \"\"\"returns information on dependencies\"\"\"\n dep_dict = dependency_dictionary(dataset, latest)\n number_of_packages = []\n for key in dep_dict.keys():\n number_of_packages.append([len(dep_dict[key]), key])\n\n number_of_packages.sort()\n\n return number_of_packages #10 most used dependencies\n\n\ndef dependency_dictionary_with_versions(dataset, latest=0):\n dep_dict = {}\n for package in dataset.values():\n if latest==0:\n for version in package:\n _dict_add_versions(version, dep_dict, version.name, version.version, depth=0)\n else:\n _dict_add_versions(package[-1], dep_dict, package[-1].name, package[-1].version, depth=0)\n return dep_dict\n\n\ndef _dict_add_versions(node, dictionary, package_name, package_version, depth):\n if node.name in dictionary.keys():\n if package_name in dictionary[node.name].keys():\n if package_version in dictionary[node.name][package_name].keys():\n if dictionary[node.name][package_name][package_version] > depth:\n dictionary[node.name][package_name][package_version] = depth\n else:\n dictionary[node.name][package_name][package_version] = depth\n\n else:\n dictionary[node.name][package_name] = {package_version:depth}\n else:\n dictionary[node.name] = {package_name:{package_version:depth}}\n\n if node.dependencies != []:\n for dependency in node.dependencies:\n _dict_add_versions(dependency, dictionary, package_name, package_version, depth+1)\n\n\ndef dep_dict_stats_versions(dataset, simplify=0):\n dict = dependency_dictionary_with_versions(dataset)\n most_popular_list = []\n for info in dep_dict_stats(dataset):\n key = info[1]\n most_popular_list.append(key)\n\n most_popular_dict = {key: dict[key] for key in most_popular_list}\n\n if simplify == 1:\n for unique_dependency in most_popular_dict.keys():\n for unique_package in most_popular_dict[unique_dependency].keys():\n #total_number_of_versions = #Want to find what percentage of major/minor/patch bump versions the dependency appears in\n most_popular_dict[unique_dependency][unique_package] = len(most_popular_dict[unique_dependency][unique_package].keys())\n\n return most_popular_dict\n\n\ndef total_number_of_deps(dataset):\n \"\"\"Returns list of all dependencies in dataset\"\"\"\n list_of_unique_dependencies = []\n for package in dataset.values():\n for version in package:\n _dfs_name_search(version, list_of_unique_dependencies)\n\n print(len(list_of_unique_dependencies)) #2008 unique dependencies (including 100 packages)\n return(list_of_unique_dependencies)\n\n\ndef _dfs_name_search(node, list):\n \"\"\"Help function for deps list above\"\"\"\n if node.name not in list:\n list.append(node.name)\n if node.dependencies != []:\n for dependency in node.dependencies:\n _dfs_name_search(dependency, list)\n\n\ndef print_tree(dataset):\n dep_list = []\n\n for package in dataset.values():\n if package[0].name == 'handlebars':\n for version in package:\n if version.version == '4.7.7':\n dep_list_copy = []\n _print_tree_help(version, dep_list_copy, 0)\n version.print()\n dep_list_copy.append(version.version)\n '''if len(dep_list) > len(dep_list_copy):\n dep_list_copy = dep_list.copy()\n dep_list = []'''\n\n\n #dep_list_copy.sort()\n print(dep_list_copy)\n print(len(dep_list_copy))\n return\n\n\ndef _print_tree_help(node, list, depth):\n list.append([node.name, depth])\n if node.dependencies != []:\n for dependency in node.dependencies:\n _print_tree_help(dependency, list, depth+1)","repo_name":"simon-frisk/Bsc-Thesis","sub_path":"analyze/find_common_deps.py","file_name":"find_common_deps.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12559932204","text":"import tensorflow as tf\nimport numpy as np\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nheight = 28\nwidth = 28\nchannels = 1\n\nn_inputs = height * width\nn_hidden1 = 300\nn_hidden2 = 100\nn_hidden3 = n_hidden1\nn_outputs = n_inputs\n\nX = tf.placeholder(tf.float32, shape = [None, n_inputs], name = \"X\")\n\nhe_init = tf.contrib.layers.variance_scaling_initializer()\nl2_regularizer = tf.contrib.layers.l2_regularizer(0.001)\n\nhidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.elu, kernel_initializer=he_init, kernel_regularizer=l2_regularizer)\nhidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.elu, kernel_initializer=he_init, kernel_regularizer=l2_regularizer)\nhidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.elu, kernel_initializer=he_init, kernel_regularizer=l2_regularizer)\noutput = tf.layers.dense(hidden3, n_outputs, activation=tf.nn.elu, kernel_initializer=he_init, kernel_regularizer=l2_regularizer)\n\nreconstruction_loss = tf.reduce_mean(tf.square(output-X))\n\nreg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\nloss = tf.add_n([reconstruction_loss]+reg_loss)\n\noptimiser = tf.train.AdamOptimizer(learning_rate=0.01)\ntraining = optimiser.minimize(loss)\n\nrec_loss_summary = tf.summary.scalar(\"Reconstruction\", reconstruction_loss)\nloss_summary = tf.summary.scalar(\"Loss\", loss)\nfile_writter = tf.summary.FileWriter(\"./saves/summary/modele1/\", tf.get_default_graph())\n\nsaver = tf.train.Saver()\n\ninit = tf.global_variables_initializer()\n\nnb_epoch = 15\nbatch_size = 150\n\nmnist = input_data.read_data_sets(\"/data/\")\n\nwith tf.Session() as sess:\n init.run()\n for epoch in range(nb_epoch):\n nb_batch = mnist.train.num_examples // batch_size\n for iteration in range(nb_batch):\n X_batch, y_batch = mnist.train.next_batch(batch_size)\n sess.run(training, feed_dict={X:X_batch})\n if iteration%10 == 0:\n loss_str = loss_summary.eval(feed_dict={X: mnist.test.images})\n rec_loss_str = rec_loss_summary.eval(feed_dict={X: mnist.test.images})\n file_writter.add_summary(loss_str, epoch)\n file_writter.add_summary(rec_loss_str, epoch)\n saver.save(sess, \"./saves/modele1_{}_{}.ckpt\".format(epoch, iteration))\n","repo_name":"Coni63/scripts_Python","sub_path":"machine_learning/Neural Net/autoencoder/MNIST/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12682168495","text":"# 반복제어문 3 - 형성평가 6 #874\n\n# 자연수 n을 입력받아 각 문제의 출력예와 같이 출력되는 프로그램을 작성하시오.\n\nipt = int(input())\n\nif ipt == 1:\n print(1)\nelse:\n #i 반복문은 줄의 개수를 의미\n for i in range(1, ipt+1):\n #j반복문은 출력되는 빈칸의 수\n for j in range(ipt-i, 0, -1):\n print(' ', end='')\n #k반복문은 출력되는 숫자의 수 (숫자는 순서대로) \n for k in range(1, i+1):\n print(k, end=' ')\n print()","repo_name":"ssafy9june/algorithm","sub_path":"JUNGOL/LCoder_Python/10. 반복제어문3/874.py","file_name":"874.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24820559546","text":"from atlantisbot_api.models import DiscordUser\nimport discord\n\nfrom bot.utils.context import Context\n\n\nasync def is_admin(ctx: Context):\n atlantis: discord.Guild = ctx.bot.get_guild(ctx.setting.server_id)\n member: discord.Member = atlantis.get_member(ctx.author.id)\n\n admin_roles = [\"coord_discord\", \"org_discord\", \"adm_discord\"]\n roles = [\n atlantis.get_role(ctx.setting.admin_roles().get(role)) for role in admin_roles\n ]\n\n has_admin = any(role in member.roles for role in roles)\n ctx.bot.logger.info(f\"[Check is_admin] {member} -> {has_admin}\")\n\n return has_admin\n\n\nasync def is_authenticated(ctx: Context):\n \"\"\"\n Checks if the user running the command is authenticated or not\n \"\"\"\n user = DiscordUser.objects.filter(discord_id=str(ctx.author.id)).first()\n\n if not user or user.disabled:\n await ctx.send(\n f\"Você precisa estar autenticado para usar esse comando. Autentique-se enviando o comando\"\n f\"**`!membro`** para mim aqui: {ctx.bot.user.mention}\"\n )\n ctx.bot.logger.info(\n f\"[Check is_authenticated] {user} -> Disabled or non-existent\"\n )\n return False\n if user.warning_date:\n await ctx.send(\n f\"Você não pode usar esse comando atualmente por ter recebido um Aviso para \"\n f\"se re-autenticar, já que mudou de nome recentemente, ou saiu do clã.\\n\\n\"\n f\"Você pode se re-autenticar enviando o comando **`!membro`** para mim aqui: {ctx.bot.user.mention}\"\n )\n ctx.bot.logger.info(f\"[Check is_authenticated] {user} -> Warning date\")\n return False\n\n ctx.bot.logger.info(f\"[Check is_authenticated] {user} -> True\")\n return True\n","repo_name":"johnvictorfs/atlantisbot","sub_path":"bot/utils/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"pt","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"11495367396","text":"#! usr/bin/python3\n# Project Euler 12 - looking for first triangle number which has more than 500 dividers\ntri_nb = 0 # triangle number\ncounter = 1\ndividers = 1 # liczba dzielnikow\nfrom math import sqrt\nwhile dividers < 500:\n dividers = 1\n prime_factors = [] # tablica czynników pierwszych\n tri_nb = tri_nb + counter\n counter = counter + 1\n # dzl = 1\n # x = 0\n k = 2\n # print('\\n',tri_nb)\n '''while dzl <= tri_nb:\n if tri_nb % dzl == 0:\n # print(dzl, end=\" \")\n x += 1\n dzl += 1\n else:\n dzl += 1\nprint('\\nThe smallest triangle number which has above 50 dividers is:', tri_nb)''' # this part counts everything good, but times too long\n # print(tri_nb)\n tri_nb2 = tri_nb # inside tri_nb2 is the largest tri_nb\n while tri_nb2 > 1 and k <= sqrt(tri_nb2):\n while tri_nb2 % k == 0:\n prime_factors.append(k)\n tri_nb2 = tri_nb2 / k\n k += 1\n if tri_nb2 > 1:\n prime_factors.append(tri_nb2)\n for x in prime_factors:\n if prime_factors.count(x) > 1:\n dividers = dividers * (prime_factors.count(x) + 1)\n del prime_factors[0: prime_factors.count(x) - 1]\n else:\n dividers = dividers * (prime_factors.count(x) + 1)\n # del prime_factors[0]\nprint(tri_nb, dividers)","repo_name":"jedrzejmatuszak/Project-Euler","sub_path":"PE12 - Highly divisible triangular number.py","file_name":"PE12 - Highly divisible triangular number.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4505036638","text":"url = '' #INSERT YOUR FTDI URL HERE\n\nimport os\nimport pyftdi.serialext\nimport math\n\ncount = 0\nbrate = 230400\nport = pyftdi.serialext.serial_for_url(url,\n baudrate=brate,\n bytesize=8,\n stopbits=1,\n parity='N',\n xonxoff=False,\n rtscts=False)\n\n\ndef cls():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef main_menu():\n cls()\n print(\"============ MAIN MENU ============\")\n print(\"|| Choose generator properties: ||\")\n print(\"|| (1) - waveform ||\")\n print(\"|| (2) - amplitude ||\")\n print(\"|| (3) - frequency ||\")\n print(\"|| (4) - turn current off ||\")\n print(\"|| (0) Exit ||\")\n try:\n ex = int(input())\n except:\n ex = 5\n return ex\n\n\ndef match_set(choice: int):\n if choice == 1:\n choice1()\n os.system(\"pause\")\n return True\n elif choice == 2:\n choice2()\n os.system(\"pause\")\n return True\n elif choice == 3:\n choice3()\n os.system(\"pause\")\n return True\n elif choice == 4:\n choice4()\n os.system(\"pause\")\n return True\n elif choice == 0:\n return False\n else:\n print('Unexpected choice')\n return True\n\n\ndef choice1():\n state = True\n while state:\n cls()\n print(\"================= Waveform =====================\")\n print(\"|| Choose which waveform you want to create:\")\n print(\"|| (1) saw\")\n print(\"|| (2) triangle\")\n print(\"|| (3) sinusoidal\")\n print(\"|| (4) exponential\")\n print(\"|| (0) Exit \")\n try:\n ex = int(input())\n except:\n print(\"Insert int value\")\n ex = 0\n if ex == 1:\n b = bytes([0b10000000])\n print(\"-\", b)\n port.write(b)\n elif ex == 2:\n b = bytes([0b10000001])\n print(\"-\", b)\n port.write(b)\n elif ex == 3:\n b = bytes([0b10000010])\n print(\"-\", b)\n port.write(b)\n elif ex == 4:\n b = bytes([0b10000011])\n print(\"-\", b)\n port.write(b)\n else:\n state = False\n\n\ndef choice2():\n cls()\n print(\"============== amplitude ==================\")\n print(\"|| Chose value [0.2, 3.3]V\")\n try:\n ex = float(input())\n except:\n print(\"Insert float value (ex. 1.0)\")\n ex = 0.0\n while ex < 0.2 or ex > 3.3:\n print (\"Wrong value\")\n try:\n ex = float(input())\n except:\n print(\"Insert float value (ex. 1.0)\")\n ex = 0.0\n v = math.ceil (ex / 0.05) - 3\n b = bytes([192 + v])\n print(\"-\", b)\n port.write(b)\n\n\ndef choice3():\n cls()\n print(\"============== frequency ==================\")\n print(\"|| Chose value [32, 2000]Hz\")\n try:\n ex = float(input())\n except:\n print(\"Insert float value (ex. 1.0)\")\n ex = 0.0\n while ex < 32 or ex > 2000:\n print (\"Wrong value\")\n try:\n ex = float(input())\n except:\n print(\"Insert float value (ex. 1.0)\")\n ex = 0.0\n v = math.ceil (2000 / ex)\n b = bytes([64 + v])\n print(\"-\", b)\n port.write(b)\n\n\ndef choice4():\n cls()\n b = bytes([0b00111100])\n print(\"-\", b)\n port.write(b)\n\n\ndef main():\n try:\n is_running = True\n\n while is_running:\n choice = main_menu()\n is_running = match_set(choice)\n except Exception as e:\n print(e)\n os.system(\"pause\")\n return 0\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"PiotrDeda/zedboard-signal-generator","sub_path":"cmd_uart.py","file_name":"cmd_uart.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4206408976","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 13 15:50:08 2019\n\n@author: alepe\n\"\"\"\nfrom tkinter import *\ndef affichageAQuiLeTour(joueur): #on rentre le nom du joueur qui doit jouer, le but est de faire une coupure d'écran entre chaque joueur pour pas que l'on voit le jeu de nos adversaires\n \n fenetre = Tk()\n champ_label = Label(fenetre, text=\"c'est au tour de \"+joueur)\n champ_label.pack()\n fenetre.mainloop() \njoueur='roger'","repo_name":"gheritarish/PAPPL","sub_path":"AffichageAQuiLeTour.py","file_name":"AffichageAQuiLeTour.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19982675162","text":"import cx_Freeze\nimport sys\nimport os\n\n\nPYTHON_INSTALL_DIR = os.path.dirname(os.path.dirname(os.__file__))\nos.environ['TCL_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tcl8.6')\nos.environ['TK_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tk8.6')\n\n\nprint(os.environ['TCL_LIBRARY'],os.environ['TK_LIBRARY'])\n\nbase = None\n\nif sys.platform == 'win32':\n base = 'Win32GUI'\nsyspath = r\"C:\\Python\\DLLs\"\nbuildOptions = dict(\n packages=[\"tkinter\",\"pandas\",\"numpy\"],\n excludes=[],\n include_files=[('tcl86t.dll', os.path.join('lib', 'tcl86t.dll')),('tk86t.dll', os.path.join('lib', 'tk86t.dll'))]\n)\nexecutables = [cx_Freeze.Executable(\"[Gems of Atlantis].py\", base=base, icon=\"hitek.ico\")]\ncx_Freeze.setup(\n name = \"Gems of Atlantis\",\n options = dict(build_exe=buildOptions),\n version = \"0.02\",\n description = \"Gems of Atlantis\",\n executables = executables\n)\n","repo_name":"gustavoalmeidadasilva/Gems-of-Atlantis","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25080946260","text":"from .function_directory import FunctionDirectory\nfrom .semantic_cube import SemanticCube\nfrom .memory import Memory\n\nclass Program():\n \"\"\"A class that represents the program\"\"\"\n\n def __init__(self, global_scope = \"\", current_scope = \"\"):\n \"\"\"Class constructor\"\"\"\n self.global_scope = global_scope\n self.current_scope = current_scope\n self.function_directory = FunctionDirectory()\n self.semantic_cube = SemanticCube()\n self.memory = Memory()\n self.temporal_variables = []\n self.temporal_parameters_names = []\n self.temporal_parameters_types = []\n self.temporal_arguments_types = []\n self.operand_stack = []\n self.type_stack = []\n self.operator_stack = []\n self.quadruple_list = []\n self.jump_list = []\n self.return_list = []\n self.temporal_variable_counter = 0\n self.quadruple_number = 1\n self.relational_operations = ['>', '<', '>=', '<=', '==', '!=']\n self.return_flag = False\n self.current_dimensioned_varible = {}\n self.dimensioned_varible_stack = []\n self.dimensioned_varible_flag = False\n self.negation_stack = []\n\n def print_stacks(self):\n \"\"\"Print the temporal stacks of the program\"\"\"\n print(self.operand_stack)\n print(self.type_stack)\n print(self.operator_stack)\n\n def print_quadruples(self):\n \"\"\"Print the list of quadruples\"\"\"\n for quadruple in self.quadruple_list:\n print(quadruple)\n","repo_name":"ar4mro/mojo-compiler","sub_path":"helpers/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1215519025","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\n\nclass Standard():\n \"\"\"\n Implement standard EM in HIV infection analysis \\n\n Args:\n initial_params: (alpha_0, beta_0, mu_0, lambda_0) \\n\n epsilon: Threshold of the stopping rule \\n\n max_iter: Max iteration. Default: 100\n \"\"\"\n \n def __init__(self, initial_params, epsilon, max_iter = 100):\n self.alpha = initial_params[0]\n self.beta = initial_params[1]\n self.mu = initial_params[2]\n self.Lambda = initial_params[3]\n self.epsilon = epsilon\n self.max_iter = max_iter\n self.n_i = np.array([379, 299, 222, 145, 109, 95, 73, 59, 45, \n 30, 24, 12, 4, 2, 0, 1, 1])\n self.N = np.sum(self.n_i)\n \n def basic_relations(self, alpha, beta, mu, Lambda):\n \"\"\"\n Calculate pi_i, z_0, t_i, p_i\n \"\"\"\n pi_0 = alpha + beta * np.exp(-mu) + (1 - alpha - beta) * np.exp(-Lambda)\n index = np.arange(1, 17)\n pi_i = beta * (mu ** index) * np.exp(-mu) + (1 - alpha - beta) * (Lambda ** index) * np.exp(-Lambda)\n pi_i_c = np.concatenate((np.expand_dims(pi_0, axis = 0), pi_i))\n z_0 = alpha / pi_0\n index_c = np.arange(17)\n t_i = (beta * (mu ** index_c) * np.exp(-mu)) / pi_i_c\n p_i = (1 - alpha - beta) * (Lambda ** index_c) * np.exp(-Lambda) / pi_i_c\n return pi_i_c, z_0, t_i, p_i\n \n def update_equation(self, alpha, beta, mu, Lambda):\n \"\"\"\n Calculate alpha^{(t+1)}, beta^{(t+1)}, mu^{(t+1)}, lambda^{(t+1)}\n \"\"\"\n pi_i, z_0, t_i, p_i = self.basic_relations(alpha, beta, mu, Lambda)\n i = np.arange(17)\n alpha_up = (self.n_i[0] * z_0) / self.N\n beta_up = np.sum(self.n_i * t_i) / self.N\n mu_up = np.sum(i * self.n_i * t_i) / np.sum(self.n_i * t_i)\n Lambda_up = np.sum(i * self.n_i * p_i) / np.sum(self.n_i * p_i)\n return alpha_up, beta_up, mu_up, Lambda_up\n\n def stopping_rule(self, params_current, params_previous):\n \"\"\"\n Calculate R^{(t)}\n \"\"\"\n return np.sqrt(np.sum((params_current - params_previous) ** 2)) / np.sqrt(np.sum(params_previous ** 2))\n\n def em(self):\n \"\"\"\n Standard EM \\n\n Return:\n alpha_history, beta_history, mu_history, lambda_history, R_history, iteration\n \"\"\"\n # Initiate\n alpha = self.alpha\n beta = self.beta\n mu = self.mu\n Lambda = self.Lambda\n alpha_t_sub_1 = alpha - self.epsilon - 1\n beta_t_sub_1 = beta - self.epsilon - 1\n mu_t_sub_1 = mu - self.epsilon - 1\n Lambda_t_sub_1 = Lambda - self.epsilon - 1\n R = self.stopping_rule(np.array([alpha, beta, mu, Lambda]), \n np.array([alpha_t_sub_1, beta_t_sub_1, mu_t_sub_1, Lambda_t_sub_1]))\n alpha_history = []\n beta_history = []\n mu_history = []\n lambda_history = []\n R_history = []\n alpha_history.append(alpha)\n beta_history.append(beta)\n mu_history.append(mu)\n lambda_history.append(Lambda)\n R_history.append(R)\n iteration = 0\n \n # Main logic\n while R > self.epsilon and iteration < self.max_iter:\n alpha_t_sub_1 = alpha\n beta_t_sub_1 = beta\n mu_t_sub_1 = mu\n Lambda_t_sub_1 = Lambda\n alpha, beta, mu, Lambda = self.update_equation(alpha_t_sub_1, beta_t_sub_1, \n mu_t_sub_1, Lambda_t_sub_1)\n R = self.stopping_rule(np.array([alpha, beta, mu, Lambda]), \n np.array([alpha_t_sub_1, beta_t_sub_1, mu_t_sub_1, Lambda_t_sub_1]))\n alpha_history.append(alpha)\n beta_history.append(beta)\n mu_history.append(mu)\n lambda_history.append(Lambda)\n R_history.append(R) \n iteration += 1\n \n return alpha_history, beta_history, mu_history, lambda_history, R_history, iteration","repo_name":"ShupeiLi/statistical-computing","sub_path":"Chapter4/EM/HIV/Standard.py","file_name":"Standard.py","file_ext":"py","file_size_in_byte":4056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73595782025","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 8 11:46:40 2019\n\n@author: jarvis\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('Churn_Modelling.csv')\nX = df.iloc[:,3:13].values\nY = df.iloc[:,13].values\n\nfrom sklearn.preprocessing import LabelEncoder , OneHotEncoder\nlabel_x_1 = LabelEncoder()\nX[:,1] = label_x_1.fit_transform(X[:,1])\nlabel_x_2 = LabelEncoder()\nX[:,2] = label_x_2.fit_transform(X[:,2])\noneHotEnc = OneHotEncoder(categorical_features=[1])\nX = oneHotEnc.fit_transform(X).toarray()\nX = X[:,1:]\n\nfrom sklearn.model_selection import train_test_split\nX_train , X_test , Y_train , Y_test = train_test_split(X,Y,test_size=0.20,random_state=0) \n\n#Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n#Make ANN\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nclassifier = Sequential()\n#Input layer and First hidden layer\nclassifier.add(Dense(6 , kernel_initializer='uniform',activation='relu',input_shape=(11,)))\n#Implementing Dropout by disabling 10% of neurons Increasing rate when overfiting doesn't solve\nclassifier.add(Dropout(rate = 0.1))\n#Hidden layer\nclassifier.add(Dense(6,kernel_initializer='uniform',activation='relu'))\n#Implementing Dropout by disabling 10% of neurons Increasing rate when overfiting doesn't solve\nclassifier.add(Dropout(rate = 0.1))\n#Output layer\nclassifier.add(Dense(1,kernel_initializer='uniform',activation='sigmoid'))\n#Compiling ANN\nclassifier.compile(optimizer= 'adam' , loss='binary_crossentropy', metrics=['accuracy'])\n#Fitting Training Data into ANN\nclassifier.fit(X_train,Y_train,batch_size=10,epochs=100)\n\ny_pred = classifier.predict(X_test)\ny_pred = (y_pred > 0.5)\n\n#Custom Prediction\nnew_pred = classifier.predict(sc.transform(np.array([[0,0,600,1,40,3,60000,2,1,1,50000]])))\nnew_pred = (new_pred>0.5)\n\n#Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(Y_test , y_pred)\n\n#Evaluating ANN\n#Applying K-Fold Cross Validation\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import cross_val_score\ndef build_classifier():\n classifier = Sequential()\n classifier.add(Dense(6 , kernel_initializer='uniform',activation='relu',input_shape=(11,)))\n classifier.add(Dense(6,kernel_initializer='uniform',activation='relu'))\n classifier.add(Dense(1,kernel_initializer='uniform',activation='sigmoid'))\n classifier.compile(optimizer= 'adam' , loss='binary_crossentropy', metrics=['accuracy'])\n return classifier\n\nkeras_classifier = KerasClassifier(build_fn=build_classifier , batch_size = 10 , epochs=100)\naccuracies = cross_val_score(estimator= keras_classifier , X = X_train , y = Y_train , cv = 10 , n_jobs=-1)\n\nmean = accuracies.mean()\nvarience = accuracies.std()\n\n#Improving Accuracy\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import GridSearchCV\ndef build_classifier(optimizer):\n classifier = Sequential()\n classifier.add(Dense(6 , kernel_initializer='uniform',activation='relu',input_shape=(11,)))\n classifier.add(Dense(6,kernel_initializer='uniform',activation='relu'))\n classifier.add(Dense(1,kernel_initializer='uniform',activation='sigmoid'))\n classifier.compile(optimizer = optimizer , loss='binary_crossentropy', metrics=['accuracy'])\n return classifier\n\nkeras_classifier = KerasClassifier(build_fn=build_classifier)\nparameters = {'batch_size':[25 , 32],\n 'epochs':[100 , 500],\n 'optimizer':['adam' , 'rmsprop']}\ngrid_search = GridSearchCV(estimator = keras_classifier ,\n param_grid = parameters,\n scoring = 'accuracy',\n cv = 10)\ngrid_search = grid_search.fit(X_train , Y_train)\nbest_parameters = grid_search.best_params_\nbest_accuracy = grid_search.best_score_\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ishaan112233/Deep_Learning_Practice","sub_path":"Artificial_Neural_Network/ANN.py","file_name":"ANN.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37805219225","text":"import random\nimport requests\nimport time\nfrom bs4 import BeautifulSoup, SoupStrainer\n\nclass Markov(object):\n\t\n\tdef __init__(self, open_file):\n\t\tself.cache = {}\n\t\tself.open_file = open_file\n\t\tself.words = self.file_to_words()\n\t\tself.word_size = len(self.words)\n\t\tself.database()\n\t\t\n\t\n\tdef file_to_words(self):\n\t\tself.open_file.seek(0)\n\t\tdata = self.open_file.read()\n\t\twords = data.split()\n\t\treturn words\n\t\t\n\t\n\tdef triples(self):\n\t\t\"\"\" Generates triples from the given data string. So if our string were\n\t\t\t\t\"What a lovely day\", we'd generate (What, a, lovely) and then\n\t\t\t\t(a, lovely, day).\n\t\t\"\"\"\n\t\t\n\t\tif len(self.words) < 3:\n\t\t\treturn\n\t\t\n\t\tfor i in range(len(self.words) - 2):\n\t\t\tyield (self.words[i], self.words[i+1], self.words[i+2])\n\t\t\t\n\tdef database(self):\n\t\tfor w1, w2, w3 in self.triples():\n\t\t\tkey = (w1, w2)\n\t\t\tif key in self.cache:\n\t\t\t\tself.cache[key].append(w3)\n\t\t\telse:\n\t\t\t\tself.cache[key] = [w3]\n\t\t\t\t\n\tdef generate_markov_text(self, size=25):\n\t\tseed = random.randint(0, self.word_size-3)\n\t\tseed_word, next_word = self.words[seed], self.words[seed+1]\n\t\tw1, w2 = seed_word, next_word\n\t\tgen_words = []\n\t\tfor i in xrange(size):\n\t\t\tgen_words.append(w1)\n\t\t\tw1, w2 = w2, random.choice(self.cache[(w1, w2)])\n\t\tgen_words.append(w2)\n\t\treturn ' '.join(gen_words)\n\n\ndef get_book_text(url):\n\tcount = 0\n\treq = requests.get(url)\n\tsoup = BeautifulSoup(req.text)\n\tpars = soup.find.all('p')\n\t\n\twith open(str(count) + \".txt\", 'w') as f:\n\t\tfor par in pars:\n\t\t\tf.write(par.string)\n\t\t\tcount += 1\n\ndef get_urls():\n\turls = []\n\tbase_url = 'https://www.gutenberg.org'\n\ttop_100_url = 'https://www.gutenberg.org/browse/scores/top'\n\n\treq = requests.get(top_100_url)\n\tsoup = BeautifulSoup(req.text, parse_only=SoupStrainer('a', href=True))\n\tfor link in soup.find_all('a')[19:119]:\n\t\turls.append(base_url + link['href'])\n\n\treturn urls\n\ndef write_file(data, filename):\n\twith open(filename, 'w') as f:\n\t\tf.write(data)\n\ndef get_final_urls(urls):\n\tfinal_urls = []\n\n\tfor url in urls:\n\t\ttime.sleep(5)\n\t\treq = requests.get(url)\n\t\tsoup = BeautifulSoup(req.text, parse_only=SoupStrainer('a', href=True))\n\n\t\tfor x in soup:\n\t\t\tif x.string == 'Read this book online: HTML':\n\t\t\t\tfinal_urls.append('https:' + x['href'])\n\n\treturn final_urls\n\n\ndef main():\n\tbook_urls = get_final_urls(get_urls())\n\tfor url in book_urls:\n\t\ttime.sleep(5)\n\t\tget_book_text(url)\n\nmain()\n\n\n\n\n","repo_name":"CameronSima/markov","sub_path":"markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40227580369","text":"# !/usr/bin/env python\n# encoding=utf-8\n# Date: 2018-09-18\n# Author: pangjian\nfrom appium.webdriver.common.touch_action import TouchAction\nimport time\n\nclass AppiumUtil():\n\n def __init__(self, driver):\n self.d = driver\n\n def getSize(self):\n x = self.d.get_window_size()['width']\n y = self.d.get_window_size()['height']\n return (x, y)\n\n def swipeUp(self):\n self.d.implicitly_wait(30)\n time.sleep(3)\n l = self.getSize()\n x1 = int(l[0] * 0.5)\n y1 = int(l[1] * 0.85)\n y2 = int(l[1] * 0.25)\n self.d.swipe(x1, y1, x1, y2)\n time.sleep(3)\n\n #屏幕向下滑动\n def swipeDown(self):\n self.d.implicitly_wait(30)\n time.sleep(3)\n l = self.getSize()\n x1 = int(l[0] * 0.5)\n y1 = int(l[1] * 0.35)\n y2 = int(l[1] * 0.85)\n self.d.swipe(x1, y1, x1, y2)\n time.sleep(3)\n\n #屏幕向左滑动\n def swipLeft(self):\n self.d.implicitly_wait(30)\n time.sleep(3)\n l = self.getSize()\n x1 = int(l[0]*0.85)\n y1 = int(l[1]*0.15)\n x2 = int(l[0]*0.05)\n self.d.swipe(x1,y1,x2,y1)\n time.sleep(3)\n\n #屏幕向右滑动\n def swipRight(self):\n self.d.implicitly_wait(30)\n time.sleep(3)\n l = self.getSize()\n x1 = int(l[0]*0.05)\n y1 = int(l[1]*0.15)\n x2 = int(l[0]*0.99)\n self.d.swipe(x1,y1,x2,y1)\n time.sleep(3)\n\n def my_longpress(self, element):\n action1 = TouchAction(self.d)\n action1.long_press(element)\n action1.perform()\n","repo_name":"crazymanpj/cli-appium-python","sub_path":"appium_util.py","file_name":"appium_util.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13666025315","text":"# -*- coding: utf-8 -*-\nimport os\n\n\n#r = input(\"请输入计算表达式:\")\n#print r\n\n#读取文件\nr = open(\"d:/mr/stu.txt\")\nlines = r.readlines()\nfor line in lines:\n print(line),\n\nf= open(\"d:/mr/stu.txt\",mode=\"a+\")\nf.write(\"hello world\")\nf.close()\n\n\n\n# os.renames(\"d:/mr/wc\" ,\"d:/mr/wc1\")\n# os.rename(\"d:/mr/stus.txt\", \"d:/mr/stu.txt\")","repo_name":"LTongSpark/workplace","sub_path":"other/day1/Test3.py","file_name":"Test3.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74921536905","text":"\"\"\"\nThis file is part of the TheLMA (THe Laboratory Management Application) project.\nSee LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.\n\nTube specs mapper.\n\"\"\"\nfrom sqlalchemy.orm import mapper\nfrom sqlalchemy.orm import relationship\n\nfrom thelma.entities.container import CONTAINER_SPECS_TYPES\nfrom thelma.entities.container import TubeSpecs\nfrom thelma.entities.rack import TubeRackSpecs\n\n\n__docformat__ = 'reStructuredText en'\n__all__ = ['create_mapper']\n\n\ndef create_mapper(containerspecs_mapper, rack_specs_container_specs_tbl):\n \"Mapper factory.\"\n rscs = rack_specs_container_specs_tbl\n m = mapper(TubeSpecs, inherits=containerspecs_mapper,\n properties=dict(\n tube_rack_specs=relationship(TubeRackSpecs, secondary=rscs,\n back_populates='tube_specs')\n ),\n polymorphic_identity=CONTAINER_SPECS_TYPES.TUBE\n )\n return m\n","repo_name":"helixyte/TheLMA","sub_path":"thelma/repositories/rdb/mappers/tubespecs.py","file_name":"tubespecs.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32463828620","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport json\nimport glob\n\ntestRunIdx = 0\n\nfileNameTemplate = \"../TestOutputData/PipelinePlanner/steps_\" + \"{0:0>5}\".format(testRunIdx) + \"_*.txt\"\n\ntestFiles = glob.glob(fileNameTemplate)\n\nfor testFile in testFiles:\n f = open(testFile)\n lines = f.readlines()\n\n # Find JSON\n jsonStr = \"\"\n lineIdx = 0\n while lineIdx < len(lines):\n if lines[lineIdx].strip() == \"\":\n break\n jsonStr += lines[lineIdx]\n lineIdx+=1\n\n configJson = json.loads(jsonStr)\n\n lines = lines[lineIdx+1:]\n\n stepDists = [configJson[\"axis0\"]['unitsPerRotation']/configJson[\"axis0\"]['stepsPerRotation'],\n configJson[\"axis1\"]['unitsPerRotation']/configJson[\"axis1\"]['stepsPerRotation']]\n\n fieldCmd = 0\n fieldUs = 1\n fieldPin = 2\n fieldLevel = 3\n\n pinAxis0Step = [\"st0\"]\n pinAxis0Dirn = [\"dr0\"]\n pinAxis1Step = [\"st1\"]\n pinAxis1Dirn = [\"dr1\"]\n\n lastAxisUs = [0,0]\n axisDirn = [1, 1]\n\n axisDist = [[],[]]\n axisSpeed = [[],[]]\n axisTimes = [[],[]]\n axisXYLastUs = 0\n axisXY = [[],[]]\n startSet = False\n startUs = 0\n curDist = [0,0]\n for line in lines:\n fields = line.split()\n lineUs = int(fields[fieldUs])\n if not startSet or lineUs < startUs:\n startUs = lineUs\n lastUs = startUs\n startSet = True\n elapsedUs = lineUs - startUs\n\n lineCmd = fields[fieldCmd]\n linePin = fields[fieldPin]\n lineLevel = int(fields[fieldLevel])\n if lineCmd == \"W\":\n if linePin in pinAxis0Step or linePin in pinAxis1Step:\n if lineLevel == 0:\n continue\n axisIdx = 0\n if linePin in pinAxis1Step:\n axisIdx = 1\n intervalUs = elapsedUs - lastAxisUs[axisIdx]\n if intervalUs != 0:\n speed = axisDirn[axisIdx] * stepDists[axisIdx] * 1e6 / intervalUs\n # print(intervalUs)\n lastAxisUs[axisIdx] = elapsedUs\n axisSpeed[axisIdx].append(speed)\n curDist[axisIdx] += axisDirn[axisIdx] * stepDists[axisIdx]\n axisDist[axisIdx].append(curDist[axisIdx])\n if axisXYLastUs != 0 and axisXYLastUs + 5 > elapsedUs:\n axisXY[0][len(axisXY[0])-1] = curDist[0]\n axisXY[1][len(axisXY[1]) - 1] = curDist[1]\n else:\n axisXY[0].append(curDist[0])\n axisXY[1].append(curDist[1])\n axisXYLastUs = elapsedUs\n axisTimes[axisIdx].append(elapsedUs)\n if linePin in pinAxis0Dirn or linePin in pinAxis1Dirn:\n axisIdx = 0\n if linePin in pinAxis1Dirn:\n axisIdx = 1\n axisDirn[axisIdx] = -1 if lineLevel else 1\n\n # for ax in axisSpeed:\n # print(ax)\n\n fig = plt.figure()\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n print(\"axisSpeed[0]\", len(axisSpeed[0]), \"\\n\", axisSpeed[0])\n print(\"axisTimes[0]\", len(axisTimes[0]), \"\\n\", axisTimes[0])\n # print(\"axisDist[0]\", len(axisDist[0]), axisDist[0])\n # print(\"axisSpeed[1]\", axisSpeed[1])\n # print(\"axisTimes[1]\", axisTimes[1])\n # print(\"axisDist[1]\", len(axisDist[1]), axisDist[1])\n ax1.scatter(axisTimes[0], axisDist[0], c=\"b\", label=\"s vs t #1\")\n ax1.scatter(axisTimes[1], axisDist[1], c=\"r\", label=\"s vs t #2\")\n\n ax2.scatter(axisTimes[0], axisSpeed[0], c=\"b\", label=\"v vs t #1\")\n ax2.scatter(axisTimes[1], axisSpeed[1], c=\"r\", label=\"v vs t #2\")\n\n ax3.scatter(axisXY[0], axisXY[1], c=\"g\", label=\"XY\")\n\n plt.show()\n\n","repo_name":"robdobsn/RBotFirmware","sub_path":"Tests/TestAnalyzePlannerOutput/TestAnalyzePlannerOutput.py","file_name":"TestAnalyzePlannerOutput.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"81"} +{"seq_id":"74806390985","text":"#! /usr/bin/env python\n\n# Author: James Bennett\n# Jan 2022\n# using template of uol_cmp9767m_tutorial/scripts/set_topo_nav_goal.py\n# by gpdas, email: pdasgautham@gmail.com\n\n\nimport rospy\nimport actionlib\n\nfrom topological_navigation.msg import GotoNodeAction, GotoNodeGoal\n\nfrom std_msgs.msg import Int32\n\nfrom grape_counter.srv import SetMode, SetModeRequest\n\n\n\nmission = (\n {\"goal\": \"Home\", \"action\": SetModeRequest.TRAVEL},\n {\"goal\": \"WP6\", \"action\": SetModeRequest.TRAVEL},\n {\"goal\": \"WP8\", \"action\": SetModeRequest.COUNT},\n {\"goal\": \"WP9\", \"action\": SetModeRequest.TRAVEL},\n {\"goal\": \"WP7\", \"action\": SetModeRequest.COUNT},\n {\"goal\": \"Home\", \"action\": SetModeRequest.TRAVEL},\n)\n\n\nclass Navigator:\n \"\"\"Carries out the task specified by mission.\"\"\"\n def __init__(self):\n \n self.total_count = 0\n\n # set up action client for sending nav goals\n self.nav_client = actionlib.SimpleActionClient('/thorvald_001/topological_navigation', GotoNodeAction)\n self.nav_client.wait_for_server()\n\n # set up service to send mode to manager node\n rospy.wait_for_service('set_mode')\n self.set_mode_srv = rospy.ServiceProxy('set_mode', SetMode)\n\n # subscribe to current grape count\n rospy.Subscriber(\"/grape_count\", Int32, self.update_count)\n\n \n def update_count(self, msg):\n self.total_count = msg.data\n print(\"Current count: {}.\".format(self.total_count))\n \n\n def run(self):\n \n for i, leg in enumerate(mission):\n # create a goal message and set target from mission list\n goal = GotoNodeGoal()\n goal.target = leg[\"goal\"]\n goal.no_orientation = False\n \n # only attempt looking at next leg if we're not on the final leg\n if not (i == len(mission)-1):\n if mission[i+1][\"action\"] == SetModeRequest.TRAVEL:\n # if the action of the next leg is travel \n # then don't worry about orientation at goal\n goal.no_orientation = True\n \n print(\"Next waypoint is {} with mode {}.\".format(goal.target, leg[\"action\"]))\n\n # if want to start a new count (for a different vine) get and sum curret count before reset\n if leg[\"action\"] == \"new_count\":\n # read count from topic\n count = 0\n self.total_count += count\n\n # tell manager node what to do\n response = self.set_mode_srv(leg[\"action\"])\n #print(response.result)\n \n # send goal and wait for it to finish\n self.nav_client.send_goal(goal)\n self.nav_client.wait_for_result()\n \n # get result, log and continue to next leg\n result = self.nav_client.get_result()\n print(\"Reached {} : {}.\".format(goal.target, result))\n\n # print finish\n print(\"Mission complete. The total grape bunches counted was {}.\".format(self.total_count))\n\n\nif __name__==\"__main__\":\n rospy.init_node('grape_navigator', anonymous=True)\n navigator = Navigator()\n print(\"Navigator Initialised. Starting Navigation...\")\n navigator.run()\n","repo_name":"bennett-j/CMP9767M","sub_path":"grape_counter/scripts/navigator.py","file_name":"navigator.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42057412514","text":"import inspect\nimport types\nfrom abc import abstractmethod\nfrom typing import Any, Awaitable, Callable, Coroutine, Dict, List, Tuple, Union\n\nfrom vkbottle import ABCRule\nfrom vkbottle.dispatch.dispenser import get_state_repr\nfrom vkbottle.tools.validator import (\n ABCValidator,\n CallableValidator,\n EqualsValidator,\n IsInstanceValidator,\n)\nfrom vkbottle_types import BaseStateGroup\n\nfrom vkbottle_callback.types import MessageEvent\n\nPayloadMap = List[Tuple[str, Union[type, Callable[[Any], bool], ABCValidator, Any]]]\nPayloadMapStrict = List[Tuple[str, ABCValidator]]\nPayloadMapDict = Dict[str, Union[dict, type]]\n\n\nclass ABCMessageEventRule(ABCRule):\n @abstractmethod\n async def check(self, event: MessageEvent) -> bool:\n pass\n\n\nclass PeerRule(ABCMessageEventRule):\n def __init__(self, from_chat: bool = True):\n self.from_chat = from_chat\n\n async def check(self, event: MessageEvent) -> bool:\n return self.from_chat is (event.peer_id != event.user_id)\n\n\nclass FromPeerRule(ABCMessageEventRule):\n def __init__(self, peer_ids: Union[List[int], int]):\n if isinstance(peer_ids, int):\n peer_ids = [peer_ids]\n self.peer_ids = peer_ids\n\n async def check(self, event: MessageEvent) -> bool:\n return event.peer_id in self.peer_ids\n\n\nclass PayloadRule(ABCMessageEventRule):\n def __init__(self, payload: Union[dict, List[dict]]):\n if isinstance(payload, dict):\n payload = [payload]\n self.payload = payload\n\n async def check(self, event: MessageEvent) -> bool:\n return event.get_payload_json() in self.payload\n\n\nclass PayloadContainsRule(ABCMessageEventRule):\n def __init__(self, payload_particular_part: dict):\n self.payload_particular_part = payload_particular_part\n\n async def check(self, event: MessageEvent) -> bool:\n payload = event.get_payload_json(unpack_failure=lambda p: {})\n return all(payload.get(k) == v for k, v in self.payload_particular_part.items())\n\n\nclass PayloadMapRule(ABCMessageEventRule):\n def __init__(self, payload_map: Union[PayloadMap, PayloadMapDict]):\n if isinstance(payload_map, dict):\n payload_map = self.transform_to_map(payload_map)\n self.payload_map = self.transform_to_callbacks(payload_map)\n\n @classmethod\n def transform_to_map(cls, payload_map_dict: PayloadMapDict) -> PayloadMap:\n \"\"\"Transforms PayloadMapDict to PayloadMap\"\"\"\n payload_map = []\n for (k, v) in payload_map_dict.items():\n if isinstance(v, dict):\n v = cls.transform_to_map(v) # type: ignore\n payload_map.append((k, v))\n return payload_map # type: ignore\n\n @classmethod\n def transform_to_callbacks(cls, payload_map: PayloadMap) -> PayloadMapStrict:\n \"\"\"Transforms PayloadMap to PayloadMapStrict\"\"\"\n for i, (key, value) in enumerate(payload_map):\n if isinstance(value, type):\n value = IsInstanceValidator(value)\n elif isinstance(value, list):\n value = cls.transform_to_callbacks(value)\n elif isinstance(value, types.FunctionType):\n value = CallableValidator(value)\n elif not isinstance(value, ABCValidator):\n value = EqualsValidator(value)\n payload_map[i] = (key, value)\n return payload_map # type: ignore\n\n @classmethod\n async def match(cls, payload: dict, payload_map: PayloadMapStrict):\n \"\"\"Matches payload with payload_map recursively\"\"\"\n for (k, validator) in payload_map:\n if k not in payload:\n return False\n elif isinstance(validator, list):\n if not isinstance(payload[k], dict):\n return False\n elif not await cls.match(payload[k], validator):\n return False\n elif not await validator.check(payload[k]):\n return False\n return True\n\n async def check(self, event: MessageEvent) -> bool:\n payload = event.get_payload_json(unpack_failure=lambda p: {})\n return await self.match(payload, self.payload_map)\n\n\nclass FuncRule(ABCMessageEventRule):\n def __init__(self, func: Callable[[MessageEvent], Union[bool, Awaitable]]):\n self.func = func\n\n async def check(self, event: MessageEvent) -> Union[dict, bool]:\n if inspect.iscoroutinefunction(self.func):\n return await self.func(event) # type: ignore\n return self.func(event) # type: ignore\n\n\nclass CoroutineRule(ABCMessageEventRule):\n def __init__(self, coroutine: Coroutine):\n self.coro = coroutine\n\n async def check(self, message: MessageEvent) -> Union[dict, bool]:\n return await self.coro\n\n\nclass StateRule(ABCMessageEventRule):\n def __init__(self, state: Union[List[\"BaseStateGroup\"], \"BaseStateGroup\"]):\n if not isinstance(state, list):\n state = [] if state is None else [state]\n self.state = [get_state_repr(s) for s in state]\n\n async def check(self, event: MessageEvent) -> bool:\n if event.state_peer is None:\n return not self.state\n return event.state_peer.state in self.state\n\n\n__all__ = (\n \"ABCMessageEventRule\",\n \"PeerRule\",\n \"FromPeerRule\",\n \"PayloadRule\",\n \"PayloadContainsRule\",\n \"PayloadMapRule\",\n \"FuncRule\",\n \"CoroutineRule\",\n \"StateRule\"\n)\n","repo_name":"mironovmeow/vkbottle-callback","sub_path":"vkbottle_callback/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"20482267429","text":"\"\"\"cette question est interressante pour comprendre comment les class marchent \"\"\"\n\"\"\"remarque: comme on peut le voir a la fin du code les deux vecteurs sont instancier a l'aide du constructeur definie dans notre class\"\"\"\n\n\"\"\"# 1er solution # non-optimal solution (the sparse vectore is not efficiently stored in the memory) # TC O(n) # SC O(1)\"\"\"\n\nclass SparseVector:\n def __init__(self, nums: List[int]):\n self.array = nums\n \n # Return the dotProduct of two sparse vectors\n def dotProduct(self, vec: 'SparseVector') -> int:\n res = 0\n # on passe sur tout les index meme ceux dont la valeur a cette idx est 0 \n for i in range(len(self.array)): # rappel: self fait reference a l'object qui appel la fct \n res += self.array[i] * vec.array[i]\n return res \n \n# Your SparseVector object will be instantiated and called as such:\n# v1 = SparseVector(nums1)\n# v2 = SparseVector(nums2)\n# ans = v1.dotProduct(v2)\n\n\"\"\"#2e sol # optimal #TC O(n) for creating the idx_val list (n its all the vectors elements ) ,It takes O(L1 + L2) for the two pointers walk through (L1 and L2 size of the\n2 lists of pairs of each vec).\n# O(L) for storing the L non-zero elements. \non va utiliser une liste qui va garder les paires (idx,val) que lorsque la valeurs est differente de 0 .\npuis on passe sut les liste pour faire le produit des idx qui sont les memes. \"\"\"\n\n\nclass SparseVector:\n \n def __init__(self, nums: List[int]):\n self.pairs = []\n for index, value in enumerate(nums):\n if value != 0:\n self.pairs.append((index, value))\n \n # ou : def __init__(self, nums: List[int]):\n # self.pairs = [(idx, num) for idx, num in enumerate(nums) if num != 0]\n \n def dotProduct(self, vec: 'SparseVector') -> int:\n\n res = p = q = 0\n\n while p < len(self.pairs) and q < len(vec.pairs):\n if self.pairs[p][0] == vec.pairs[q][0]: # si il ont le meme idx alors on les multiplie\n res += self.pairs[p][1] * vec.pairs[q][1]\n p += 1\n q += 1\n # si l'index n'est pas egale alors on doit avancer l'idx le plus petit \n elif self.pairs[p][0] < vec.pairs[q][0]: # si idx du vecteur qui a appeler est plus petit que l'idx du vecteur parametre \n p += 1\n else: # si self.pairs[p][0] >= vec.pairs[q][0]:\n q += 1\n\n return res\n\n \n# Your SparseVector object will be instantiated and called as such:\n# v1 = SparseVector(nums1)\n# v2 = SparseVector(nums2)\n# ans = v1.dotProduct(v2)\n\n\"\"\" #follow up #binary search # TC O( l1 * log(l2) ) # SC O(n) car on garde une array de paires donc si il ya pas de 0 la taille de cette array est n (taille de l'array nums).\nce follow up est tres demander chez facebook de meme que la question de base (la solution voulue a la question de base est la sol 2) (la solution ici proposer est la solution\nvoulue chez facebook pour le follow up).\n\nfollow up : \nWhat if only one vector is sparse and the other is full of non-zero values?\non utilisera aussi une liste de paires (idx,val) mais cette fois a la place de parcourir tout les idx jusqu'a ce qu'on trouve les memes idx dans les 2 list on va utiliser un\nbinary search qui va chercher les idx de la sparse vector dans le vecteur qui n'est pas sparse. (vu qu'il y'a moins d'idx dans la sparse vector que dans l'autre car il ya plus de 0 donc\nle sparse vector est le vector le plus petit)\nex :\nsparse_vec = [(5, 2), (7, 8)]\nfull_vec = [(1, 2), (2, 4), (3, 5), (4, 6), (7, 19)]\nWe do binary search to find the idx position 5 in full_vec, since there isn't a tuple with idx = 5 we return 'inf' indicating none valid idx found.\nWe do binary search to find the idx position 7 in full_vec, we successfully find the tuple with idx = 7 in full_vec has the val = 19. We do res += 8 * 19.\n\nTC analyse : soit l1 la taille du sparse vector (cad la taille du array qui contient les paires) et l2 la taille du non-sparse vector (cad la taille du array qui contient les paires) ,\npour chaque paire dans le sparese vector on chaire une paire dans le non-sparse vector, on cherche l1 paire dans le non sparse vector de taille l2 , cad on doit faire l1 binary search dans \nle non sparxe vector de taille l2 . Dans le non sparse vector chaque binary search coute O(logn) comme on doit en faire l2 fois donc TC = O( l1 * log(l2) ). \n(comme on peut foir on a une boucle for qui parcours le sparse vector donc l1 interaction et a chaque interaction on fait un binary searh dans le non-sparse vector ce qui coute O(log(l2))\ndonc en tout la boucle coute O(l1*log(l2)). )\n\n\"\"\"\n\nclass SparseVector:\n \n def __init__(self, nums: List[int]):\n self.pairs = [(idx, num) for idx, num in enumerate(nums) if num != 0]\n \n def dotProduct(self, vec: 'SparseVector') -> int:\n\n # trouver le sparse vector ---------------------------------------\n # le sparse vecteur est forcement le plus petit donc :\n sparseVec = None\n if len(self.pairs) < len(vec.pairs):\n sparseVec = self \n notSparseVec = vec \n else :\n sparseVec = vec \n notSparseVec = self\n #-------------------------------------------------------------------\n \n # chercher les valeurs dans la non-spare vector qui on les meme idx que les idx du sparse vector , \n # puis faire le produit val1*val2 (avec (idx,val1) appartenant au sparse vector et (idx,val2) appartenant au non sparse vector. idx est le meme dans les deux paire)\n res= 0\n for pair in sparseVec.pairs : # on parcours tout les paires du sparse vector \n # val est la valeur de l'idx ds le vecteur qui n'est pas sparse si cette idx existe pas dans notSparseVec alors val = inf\n val = self.binarySearch(notSparseVec, pair[0]) #on cherche la paire dans le non-sparse vector qui a le meme idx que la paire actuelle du sparse vector\n res += val * pair[1] if val != float('inf') else 0 #on fait le dot product que si une paire du non-sparse vector a le meme idx que la paire actuelle du sparse vector \n return res\n \n # on utilise binary search car les idx sont forcement ordonner puisque c'est des idx \n def binarySearch(self,vec,target_idx) : # cost O(log(size array where we look for the target element))\n start, end = 0, len(vec.pairs) - 1\n \n while start <= end: \n mid = (start + end) // 2 \n\n if vec.pairs[mid][0] == target_idx: \n return vec.pairs[mid][1] # return val qui a le meme idx de la target \n \n elif vec.pairs[mid][0] > target_idx: #cad la target ce trouve avant la paire actuelle \n end = mid -1\n \n else: #cad la target ce trouve apres la paire actuelle \n start = mid + 1 \n \n return float('inf') # si il ya pas de valeur avec le target_idx alors return inf\n\n \n# Your SparseVector object will be instantiated and called as such:\n# v1 = SparseVector(nums1)\n# v2 = SparseVector(nums2)\n# ans = v1.dotProduct(v2)\n","repo_name":"rtn75000/leetcode-pb","sub_path":"1570. Dot Product of Two Sparse Vectors/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":7175,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19837668109","text":"# Xây dựng hàm để tính tổng 2 số \r\ndef tong2so(a, b):\r\n tong = a + b\r\n return tong\r\n\r\n# Cho a=5, b=2.5\r\na = 5\r\nb = 2.5\r\n\r\n# Truyền tham số vào hàm như sau:\r\ntong_a_b = tong2so(a, b)\r\n\r\n# In ra kết quả:\r\nprint(\"Tổng a + b là: \", tong_a_b)","repo_name":"VuNghiXuan/giaotrinhPythonExcel","sub_path":"C2/6.def_Tong2so.py","file_name":"6.def_Tong2so.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"vi","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"4948808716","text":"from flask import Flask, request, jsonify\napp = Flask(__name__)\n\n@app.route(\"/\", methods=[\"GET\"])\ndef get_my_ip():\n ip_addr = request.remote_addr\n return '

You are connecting from:' + ip_addr\n # return jsonify({'ip': request.remote_addr}), 200\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","repo_name":"BitNetGeek/net4005assignment5","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7055065177","text":"from io import BytesIO\nfrom typing import IO\nfrom urllib.parse import urlparse\n\nfrom pyodide._package_loader import get_dynlibs, wheel_dist_info_dir\nfrom pyodide.ffi import IN_BROWSER, to_js\nfrom pyodide.http import pyfetch\n\ntry:\n import pyodide_js\n from pyodide_js import loadedPackages, loadPackage\n from pyodide_js._api import loadBinaryFile, loadDynlib # type: ignore[import]\n\n REPODATA_PACKAGES = pyodide_js._api.repodata_packages.to_py()\n REPODATA_INFO = pyodide_js._api.repodata_info.to_py()\nexcept ImportError:\n if IN_BROWSER:\n raise\n # Otherwise, this is pytest test collection so let it go.\n\n\nasync def fetch_bytes(url: str, kwargs: dict[str, str]) -> IO[bytes]:\n parsed_url = urlparse(url)\n if parsed_url.scheme == \"emfs\":\n return open(parsed_url.path, \"rb\")\n if parsed_url.scheme == \"file\":\n result_bytes = (await loadBinaryFile(parsed_url.path)).to_bytes()\n else:\n result_bytes = await (await pyfetch(url, **kwargs)).bytes()\n return BytesIO(result_bytes)\n\n\nasync def fetch_string(url: str, kwargs: dict[str, str]) -> str:\n return await (await pyfetch(url, **kwargs)).string()\n\n\n__all__ = [\n \"fetch_bytes\",\n \"fetch_string\",\n \"REPODATA_INFO\",\n \"REPODATA_PACKAGES\",\n \"loadedPackages\",\n \"loadDynlib\",\n \"loadPackage\",\n \"get_dynlibs\",\n \"wheel_dist_info_dir\",\n \"to_js\",\n]\n","repo_name":"gabriel-v/python-wasm-pygame-experiments","sub_path":"src/micropip/micropip/_compat_in_pyodide.py","file_name":"_compat_in_pyodide.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"21550762328","text":"import os\nimport sys\n\ndir_path = input('Enter directory path')\n\nif os.path.exists(dir_path):\n all_files = os.listdir(dir_path)\nelse:\n print(f\"{dir_path} is not valid\")\n sys.exit(1)\n\n\nfor file in all_files:\n if os.path.isfile(os.path.join(dir_path,file)):\n print(f\"{file} is file\")\n else:\n print(f\"{file} is directory\")\n","repo_name":"ShreyasSubhedar/File-Automation","sub_path":"readDir.py","file_name":"readDir.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31732191751","text":"import json\nimport mtgsdk as mtg\nimport pytest\n\nfrom sdk_client import card_utils\nfrom sdk_client.scripts.cards_csv import get_fieldnames\n\n\njson_data = lambda s: f'data/{s}.json'\n\n@pytest.fixture\ndef card():\n '''\n We should not perform GET requests to test our code. Instead, download some\n cards and save as JSON\n '''\n # card = mtg.Card.where(set='m19') \\\n # .where(page=1) \\\n # .where(pageSize=1) \\\n # .all()\n # return card[0]\n raise DeprecationWarning()\n\n@pytest.fixture(scope='module')\ndef m19_cards():\n with open(json_data('m19'), 'r') as f:\n return json.load(f)\n\n@pytest.fixture(scope='module')\ndef m19_card(m19_cards):\n return m19_cards[0]\n\n@pytest.mark.skip\ndef test_foreign_names(card):\n card_attr = vars(card)\n # print(card_attr)\n\n for elem in card_attr['foreign_names']:\n name = elem['name']\n lang = elem['language']\n if lang == 'Portuguese (Brazil)':\n card_attr['name_ptbr'] = name\n break\n\n print(card_attr['name'])\n print(card_attr['name_ptbr'])\n #assert False\n\n@pytest.mark.skip(reason='Slow as fuck')\ndef test_extract_cards():\n cards = mtg.Card.where(set='m19').all()\n print(len(cards))\n assert False\n\ndef test_m19_card(m19_card):\n c = card_utils.Card(m19_card)\n assert c.foreign_name('Portuguese (Brazil)') == 'Égide dos Céus'\n assert c.color() == 'white'\n\n@pytest.mark.parametrize('n, expected', [\n (226, 'artifact'),\n (261, 'basic land'),\n pytest.param(261, 'land', marks=pytest.mark.xfail),\n (218, 'gold'),\n (248, 'land'),\n pytest.param(248, 'basic land', marks=pytest.mark.xfail),\n])\ndef test_color(n, expected, m19_cards):\n for card in m19_cards:\n try:\n num = int(card['number'])\n if num == n:\n c = card_utils.Card(card)\n assert c.color() == expected\n except ValueError:\n continue\n\n@pytest.mark.xfail(reason='Simple inspection')\ndef test_cardset(m19_cards):\n c = card_utils.CardSet(m19_cards)\n # c = card_utils.CardSet()\n # for card in m19_cards:\n # c.append(card)\n print(sorted(c.fieldnames()))\n assert False\n","repo_name":"victorlacorte/MTG-SDK-Client","sub_path":"tests/test_cards.py","file_name":"test_cards.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25047310406","text":"from functools import update_wrapper\n\nfrom datetime import datetime\n\nfrom fsm_admin.mixins import FSMTransitionMixin\n\nfrom django.conf.urls import url\nfrom django.contrib import admin\n\nfrom general_affair.models import (Supplier, SupplierBusinessType,\n ItemType, ItemCategory, Item, PurchaseOrder,\n OrderReceipt, ItemIssued, IDReleaseType,\n IDCard)\nfrom reporting.response import PDFResponse\n\n\ndef get_model_info(obj):\n app_label = obj.model._meta.app_label\n try:\n return (app_label, obj.model._meta.model_name,)\n except AttributeError:\n return (app_label, obj.model._meta.module_name,)\n\n\ndef wrap(obj, view):\n def wrapper(*args, **kwargs):\n return obj.admin_site.admin_view(view, cacheable=True)(*args, **kwargs)\n return update_wrapper(wrapper, view)\n\n\n@admin.register(Supplier)\nclass SupplierAdmin(admin.ModelAdmin):\n list_display = (\n 'name',\n 'phone_number',\n 'address',\n 'business_type'\n )\n fieldsets = (\n ('General Info', {\n 'fields': (\n 'name',\n 'address',\n 'phone_number',\n 'business_type',\n )\n }),\n ('Business Info', {\n 'fields': (\n 'owner',\n 'tax_id_number',\n 'owner_id_number',\n 'siup_number',\n 'tdp_number',\n )\n }),\n ('Partnership & Contract', {\n 'fields': (\n 'join_date',\n 'start_date',\n 'end_date',\n 'description'\n )\n })\n )\n search_fields = ('name', )\n\n def get_urls(self):\n urls = super(SupplierAdmin, self).get_urls()\n info = get_model_info(self)\n admin_extra_url = [\n url(r'report/$', self.admin_site.admin_view(self.report), name='%s_%s_report' % info)\n ]\n\n return admin_extra_url + urls\n\n def report(self, request, *args, **kwargs):\n queryset = self.model._default_manager.get_queryset()\n template = 'general_affair/report/supplier.html'\n context = {\n 'suppliers': queryset,\n 'today': datetime.now()\n }\n return PDFResponse(request, template, context, filename='Supplier.pdf')\n\n\n@admin.register(SupplierBusinessType)\nclass SupplierBusinessTypeAdmin(admin.ModelAdmin):\n pass\n\n\n@admin.register(ItemType)\nclass ItemTypeAdmin(admin.ModelAdmin):\n list_display = ('code', 'name')\n\n\n@admin.register(ItemCategory)\nclass ItemCategoryAdmin(admin.ModelAdmin):\n pass\n\n\n@admin.register(Item)\nclass ItemAdmin(admin.ModelAdmin):\n list_display = (\n 'code',\n 'name',\n 'item_type',\n 'item_category'\n )\n list_filter = ('item_type__name', 'item_category__name')\n search_fields = ('code', 'name')\n\n\n@admin.register(PurchaseOrder)\nclass PurchaseOrderAdmin(FSMTransitionMixin, admin.ModelAdmin):\n list_display = (\n 'get_po_number',\n 'item',\n 'supplier',\n 'quantity',\n 'order_date',\n 'state'\n )\n fields = (\n 'number',\n 'order_date',\n 'item',\n 'quantity',\n 'supplier'\n )\n list_filter = ('item__name', )\n search_fields = ('number', 'item__name')\n\n\n@admin.register(OrderReceipt)\nclass OrderReceiptAdmin(admin.ModelAdmin):\n list_display = (\n 'number',\n 'purchase_order',\n 'quantity',\n 'receipt_date'\n )\n search_fields = ('number', 'purchase_order__number')\n\n\n@admin.register(ItemIssued)\nclass ItemIssuedAdmin(admin.ModelAdmin):\n list_display = (\n 'item',\n 'quantity',\n 'date_issued',\n 'recipient',\n 'allocation'\n )\n\n\n@admin.register(IDReleaseType)\nclass IDReleaseTypeAdmin(admin.ModelAdmin):\n pass\n\n\n@admin.register(IDCard)\nclass IDCardAdmin(admin.ModelAdmin):\n list_display = (\n 'employee',\n 'date_created',\n 'date_expired',\n 'status',\n 'release_type'\n )\n","repo_name":"andrewidya/littleerp","sub_path":"general_affair/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14002660078","text":"# -*- coding: utf-8 -*-\nimport logging\n\nimport scrapy\nfrom scrapy.http import Response\n\nfrom spider_yanzhi.items import SpiderYanzhiItem\n\nlogger = logging.getLogger(__name__)\n\nclass VrpanoNewsSpider(scrapy.Spider):\n name = 'vrpano_news'\n allowed_domains = ['sh.joojzz.com']\n start_urls = ['http://sh.joojzz.com/zixun/']\n\n def parse(self, response):\n sub_selectors = response.xpath('//div[@class=\\'mainLeft\\']//li')\n list_len = len(sub_selectors) - 1\n list_count = 0\n for sub_selector in sub_selectors:\n list_count += 1\n item = SpiderYanzhiItem()\n # title = sub_selector.xpath('a/text()')\n url_selector = sub_selector.xpath('strong/a/@href')\n if url_selector:\n item['original_url'] = url_selector.extract_first().replace(' ','').strip()\n # 下一页\n next_page = response.xpath(\"//a[@class='next']/@href\")\n # next_text = response.xpath(\"//div[@class='Top10 TxtCenter']/div/a[3]/text()\").extract_first()\n next_page_url = None\n if next_page:\n next_page_url = next_page.extract_first().strip()\n next_page_url = 'http://sh.joojzz.com' + next_page_url\n\n if url_selector:\n yield scrapy.Request(url=item['original_url'], callback=self.parse_detail,\n meta={'item': item, 'len': list_len, 'count': list_count,\n 'next_page_url': next_page_url}, dont_filter=True)\n\n def parse_detail(self, response: Response):\n item = response.meta['item']\n len = response.meta['len']\n count = response.meta['count']\n next_page_url = response.meta['next_page_url']\n # 获取标题\n title = response.xpath(\"//div[@class='title']/h1/text()\").extract_first()\n sub_title = response.xpath(\"//div[@class='title']/h6/font\")\n\n html_content = response.xpath(\"//div[@class='content']\").extract_first()\n\n # 保存数据item\n item['title'] = title.strip()\n item['content'] = html_content.strip()\n item['from_source'] = sub_title[0].xpath('string()').extract_first()\n item['publish_at'] = sub_title[1].xpath('string()').extract_first()\n item['original_url'] = response.url\n\n yield item\n # 当前数据遍历完\n if len == count:\n # 有下一页\n logger.info('准备爬取新的一页{}数据'.format(next_page_url))\n if next_page_url:\n yield scrapy.Request(url=next_page_url, callback=self.parse, dont_filter=True)\n\n","repo_name":"seniortesting/websites","sub_path":"spider.seniortesting.club/projects/spider_yanzhi/spider_yanzhi/spiders/vrpano_news.py","file_name":"vrpano_news.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27924101232","text":"# Triangle Types\r\nimport math\r\n\r\n\r\ntriangle_sides = input().split(' ')\r\n\r\nfor x in range(3):\r\n triangle_sides[x] = float(triangle_sides[x])\r\ntriangle_sides.sort(reverse=True)\r\na, b, c = triangle_sides\r\n\r\na2, b2, c2 = math.pow(a,2), math.pow(b,2), math.pow(c,2)\r\n\r\nif a >= (b+c):\r\n print(\"NAO FORMA TRIANGULO\")\r\nelse:\r\n if a2 == (b2+c2):\r\n print(\"TRIANGULO RETANGULO\")\r\n if a2 > (b2+c2):\r\n print(\"TRIANGULO OBTUSANGULO\")\r\n if a2 < (b2+c2):\r\n print(\"TRIANGULO ACUTANGULO\")\r\n if a == b == c:\r\n print(\"TRIANGULO EQUILATERO\")\r\n if (a == b != c) or (b == c != a) or (c == a != b):\r\n print(\"TRIANGULO ISOSCELES\")","repo_name":"rendersonjunior/UriOnlineJudge-Python","sub_path":"1045_Triangle_Types.py","file_name":"1045_Triangle_Types.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32184560978","text":"\"\"\"\n递归用的是系统栈;\n\"\"\"\n\ndef calc(s, left, right):\n \"\"\"\n ①先找到优先级最低的运算符的位置;\n ②再进行递归,分治运算\n\n 总结反思:\n ①遍历得到最低优先级时,cur的赋值得比pre大,否则,在没有加减乘除操作时,如果遇到()依然会赋值,导致操作为(),后面会发生 错误\n ②在给op赋值时,其位置应该加上传入的参数left,才是最低优先级操作在原字符串中的真正位置\n ③在op==-1时,需要将字符串转为数字,如‘((234’. 字符转ascii码\n \"\"\"\n # 当前优先级最低的运算符索引,如果不是运算符,索引为-1,表示是数字\n op = -1\n # 前一优先级\n pre = 10000 - 1\n # 当前优先级\n cur = 10000\n # 表示在遇到括号时,优先级增加的数值\n temp = 0\n\n # 遍历,找到优先级最低的位置,然后进行分治递归计算\n\n for idx, character in enumerate(s[left:right]):\n if character == '+' or character == '-':\n cur = 1 + temp\n elif character == '*' or character == '/':\n cur = 2 + temp\n elif character == '(':\n temp += 100 # 遇到括号后,为了增加后面的优先级,但此步骤cur不变\n elif character == ')':\n temp -= 100\n else:\n continue\n\n if cur < pre:\n pre = cur\n op = idx + left\n if op != -1:\n print('当前最低优先级的操作为:{}'.format(s[op]))\n else:\n num = 0\n for number in s[left: right]:\n if ord(number) < ord('0') or ord(number) > ord('9'): continue\n num = num * 10 + (ord(number) - ord('0'))\n return num\n a, b = calc(s, left, op), calc(s, op+1, right)\n if s[op] == '+':\n return a + b\n elif s[op] == '-':\n return a - b\n elif s[op] == '*':\n return a * b\n elif s[op] == '/':\n return a / b\n\n\nif __name__ == '__main__':\n s = '2*(2+5)+10+2*5'\n res = calc(s, 0, len(s))\n print('表达式{}的计算结果为:{}'.format(s, res))","repo_name":"THZdyjy/algorithm-progress","sub_path":"栈与递归/递归解决表达式求值.py","file_name":"递归解决表达式求值.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34643333955","text":"import tkinter as tk\nfrom tkinter import filedialog\nfrom tkinter import ttk\nfrom tkinter import messagebox as mb\nimport matplotlib\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PackageCounter import countPackets\n\n\nwindow = tk.Tk()\n\n\ndef createDiagramm():\n matplotlib.use('TkAgg')\n pie = plt.figure(figsize=(4, 4), facecolor=\"#F0F0F0\")\n pie.labels = ['ARP', 'DHCP', 'DNS', 'TCP', 'UDP', 'Other']\n filepath = filedialog.askopenfilename(filetypes=[(\"PCAP files\", \"*.pcap\")])\n try:\n pie.sizes = countPackets(filepath)\n except Exception:\n mb.showerror(\n \"Error!\",\n \"You need to select *.pcap file.\"\n )\n return\n pie.patches, pie.text2, pie.text1 = plt.pie(pie.sizes,\n labels=pie.labels,\n autopct='%1.1f%%',\n shadow=True,\n startangle=90,\n pctdistance=1.4,\n textprops={'fontsize': 8, 'color': '#000080'},\n wedgeprops={'lw': 1, 'ls':'--', 'edgecolor':\"k\"},\n rotatelabels=True\n )\n plt.axis('equal')\n\n canvas_statis = FigureCanvasTkAgg(pie, window)\n canvas_statis.get_tk_widget().place(x=10, y=60)\n\n colors = [[\"r\", \"b\", \"g\"][int(np.random.randint(0, 3, 1))] for _ in pie.sizes]\n bar = plt.figure(figsize=(4, 4), facecolor=\"#F0F0F0\")\n bar.patches = plt.bar(pie.labels,\n pie.sizes,\n alpha=0.6,\n bottom=2,\n color=colors,\n edgecolor=\"k\",\n linewidth=2\n )\n figureCanvas = FigureCanvasTkAgg(bar, window)\n figureCanvas.get_tk_widget().place(x=400, y=60)\n\n\ndef on_closing():\n if mb.askokcancel(\"Quit\", \"Do you want to quit?\"):\n window.destroy()\n\n\ndef execMainWindow():\n window.title(\"MOKS Lemaykin KI21-01-11M\")\n window.geometry('800x600')\n\n create_button = ttk.Button(text=\"Create diagramm\", command=createDiagramm)\n create_button.grid(column=1, row=1, padx=10, pady=10)\n\n window.protocol(\"WM_DELETE_WINDOW\", on_closing)\n window.mainloop()\n","repo_name":"savethemurloc/MOKS","sub_path":"MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5417549110","text":"from imageio import imread\nimport numpy as np\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom numpy.random import choice\n\n\ndef compute_summary_statistics(images, num_files=500, savepath=None, extension=\"png\"):\n files = list(Path(images).glob(\"*.\" + extension))\n files = choice(files, min(num_files, len(files)))\n\n imgs = [imread(x) for x in tqdm(files)]\n imgs = np.concatenate(imgs, axis=0) # tile vertically\n mean = np.mean(imgs, axis=(0, 1))\n std = np.std(imgs, axis=(0, 1))\n\n means, stds = mean.tolist(), std.tolist()\n if savepath is not None:\n with open(savepath, \"w\") as outfile_h:\n outfile_h.write(f\"mean per channel: {means}\\n\")\n outfile_h.write(f\"std per channel: {stds}\\n\")\n return means, stds\n","repo_name":"russelldj/mmsegmentation_utils","sub_path":"mmseg_utils/dataset_creation/summary_statistics.py","file_name":"summary_statistics.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42812054264","text":"import json, os, time, sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom src.query_wrapper import QueryWrapper\nfrom log.querytime import log_querytime\n\n# endpoint = \"http://kg2018a.isi.edu:3030/test/sparql\"\n# endpoint = \"http://localhost:3030/ds/query\"\nendpoint = \"http://gaiadev01.isi.edu:3030/effect/sparql\"\ngraph = QueryWrapper(endpoint)\n\nwith open('../resources/karma_context.json') as f:\n context = json.load(f)\n\nframe_files = set(os.listdir('../resources/prod_query/frame'))\n\nfor filename in os.listdir('../resources/prod_query/query'):\n if not filename.startswith('arimax_malware_aggs'):\n continue\n print('\\n------ try %s -------' % filename)\n if filename.replace('.txt', '.json') in frame_files:\n with open('../resources/prod_query/frame/%s' % filename.replace('.txt', '.json')) as frame_f:\n frame = json.load(frame_f)\n else:\n frame = {}\n\n with open('../resources/prod_query/query/%s' % filename) as query_f:\n query = query_f.read()\n\n res = graph.query(query, frame, context, paging=1000)\n\n lr, lb, tq, tf = res.get('@info', {1: -1, 2: -1, 3: -1, 4: -1}).values()\n print(lr, lb, tq, tf)\n log_querytime(filename[:-4], lr, lb, tq, tf, tq + tf, endpoint, full_query=query)\n\n with open('./outputs/%s' % filename, 'w') as f:\n json.dump(res.get('@graph', {}), f, indent=2)\n","repo_name":"usc-isi-i2/sparql-jsonld","sub_path":"examples/prod_query_example.py","file_name":"prod_query_example.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"17813082568","text":"import math\r\n# H=10\r\nH=int(input())\r\n# A,B,C,D=[5,5,5,5]\r\nA,B,C,D=[int(i) for i in input().split()]\r\n# VA,VB,VC,VD=[1,2,1,2]\r\nVA,VB,VC,VD=[int(i) for i in input().split()]\r\n# UDa,UDb,UDc,UDd=['D','U','U','D']\r\nUDa,UDb,UDc,UDd=[i for i in input().split()]\r\nVa=VA*(1 if UDa=='U' else -1)\r\nVb=VB*(1 if UDb=='U' else -1)\r\nVc=VC*(1 if UDc=='U' else -1)\r\nVd=VD*(1 if UDd=='U' else -1)\r\ndef area(x,y,z):\r\n xy=((x[0]-y[0])**2 + (x[1]-y[1])**2 + (x[2]-y[2])**2)**(1/2)\r\n yz=((z[0]-y[0])**2 + (z[1]-y[1])**2 + (z[2]-y[2])**2)**(1/2)\r\n xz=((x[0]-z[0])**2 + (x[1]-z[1])**2 + (x[2]-z[2])**2)**(1/2)\r\n S=(xy+yz+xz)/2\r\n ar=abs(int((S)*(S-xy)*(S-yz)*(S-xz)))**(1/2)\r\n return(ar)\r\n\r\na=[0,0,A]\r\nb=[H,0,B]\r\nc=[H,H,C]\r\nd=[0,H,D]\r\nmaxar=[area(a,b,c)+area(a,d,c)]\r\ncanAmove=True\r\ncanBmove=True\r\ncanCmove=True\r\ncanDmove=True\r\nCount=3\r\nwhile canAmove or canBmove or canCmove or canDmove:\r\n if canAmove:\r\n A+=Va\r\n if A>=H or A<=0:\r\n if A>=H:\r\n a=[0,0,H]\r\n if A<=0:\r\n a=[0,0,0]\r\n Count-=1\r\n canAmove=False\r\n else:\r\n a=[0,0,A]\r\n\r\n if canBmove:\r\n B+=Vb\r\n if B>=H or B<=0:\r\n if B>=H:\r\n b=[H,0,H]\r\n if B<=0:\r\n b=[H,0,0]\r\n Count-=1\r\n canBmove=False\r\n else:\r\n b=[H,0,B]\r\n if canCmove:\r\n C+=Vc\r\n if C>=H or C<0:\r\n if C>=H:\r\n c=[H,H,H]\r\n if C<=0:\r\n C=[H,H,0]\r\n Count-=1\r\n canCmove=False\r\n else:\r\n c=[H,H,C]\r\n \r\n if canDmove:\r\n D+=Vd\r\n if D>=H or D<=0:\r\n if D>=H:\r\n d=[0,H,H]\r\n if D<=0:\r\n d=[0,H,0]\r\n Count-=1\r\n canDmove=False\r\n else:\r\n d=[0,H,D]\r\n arabc=(area(a,b,c))\r\n aradc=(area(a,d,c))\r\n maxar.append((arabc+aradc))\r\n \r\nprint(\"{0} {1}\".format(int(4*(max(maxar)**2)),int(4*(min(maxar)**2))))\r\n","repo_name":"sameersetia/sam","sub_path":"4Particles.py","file_name":"4Particles.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2759250767","text":"INDEX_CREATE = {\n \"settings\": {\n \"refresh_interval\": \"1s\",\n \"analysis\": {\n \"filter\": {\n \"english_stop\": {\"type\": \"stop\", \"stopwords\": \"_english_\"},\n \"english_stemmer\": {\"type\": \"stemmer\", \"language\": \"english\"},\n \"english_possessive_stemmer\": {\"type\": \"stemmer\", \"language\": \"possessive_english\"},\n \"russian_stop\": {\"type\": \"stop\", \"stopwords\": \"_russian_\"},\n \"russian_stemmer\": {\"type\": \"stemmer\", \"language\": \"russian\"},\n },\n \"analyzer\": {\n \"ru_en\": {\n \"tokenizer\": \"standard\",\n \"filter\": [\n \"lowercase\",\n \"english_stop\",\n \"english_stemmer\",\n \"english_possessive_stemmer\",\n \"russian_stop\",\n \"russian_stemmer\",\n ],\n }\n },\n },\n },\n \"mappings\": {\n \"dynamic\": \"strict\",\n \"properties\": {\n \"id\": {\"type\": \"keyword\"},\n \"imdb_rating\": {\"type\": \"float\"},\n \"genre\": {\n \"type\": \"nested\",\n \"dynamic\": \"strict\",\n \"properties\": {\n \"id\": {\"type\": \"keyword\"},\n \"name\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n },\n },\n \"title\": {\"type\": \"text\", \"analyzer\": \"ru_en\", \"fields\": {\"raw\": {\"type\": \"keyword\"}}},\n \"description\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n \"director\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n \"actors_names\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n \"writers_names\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n \"actors\": {\n \"type\": \"nested\",\n \"dynamic\": \"strict\",\n \"properties\": {\n \"id\": {\"type\": \"keyword\"},\n \"name\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n },\n },\n \"writers\": {\n \"type\": \"nested\",\n \"dynamic\": \"strict\",\n \"properties\": {\n \"id\": {\"type\": \"keyword\"},\n \"name\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n },\n },\n },\n },\n}\n\nSQL_QUERY = \"\"\"\n SELECT\n fw.id,\n fw.title,\n fw.description,\n fw.rating,\n fw.type,\n fw.created_at,\n fw.updated_at,\n COALESCE (\n json_agg(\n DISTINCT jsonb_build_object(\n 'person_role', pfw.role,\n 'person_id', p.id,\n 'person_name', p.full_name,\n 'updated_at', p.updated_at\n )\n ) FILTER (WHERE p.id is not null),\n '[]'\n ) as persons,\n COALESCE (\n json_agg(\n DISTINCT jsonb_build_object(\n 'genre_id', g.id,\n 'genre_name', g.name\n )\n ) FILTER (WHERE g.id is not null),\n '[]'\n ) as genres,\n array_agg(g.updated_at) as g_updated_at,\n array_agg(p.updated_at) as p_updated_at\n FROM content.film_work fw\n LEFT JOIN content.person_film_work pfw ON pfw.film_work_id = fw.id\n LEFT JOIN content.person p ON p.id = pfw.person_id\n LEFT JOIN content.genre_film_work gfw ON gfw.film_work_id = fw.id\n LEFT JOIN content.genre g ON g.id = gfw.genre_id\n %s\n GROUP BY fw.id\n ORDER BY fw.updated_at\n LIMIT %s;\n \"\"\"\n","repo_name":"AlexanderPRM/Cinema","sub_path":"films_api/etl/etl_movies/app/quiries.py","file_name":"quiries.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"12857293050","text":"primeira_nota = float(input('Digite a primeira nota: '))\nsegunda_nota = float(input('Digite a segunda nota: '))\nmedia_nota = (primeira_nota + segunda_nota) / 2\nconceito = None\nif 9.0 < media_nota <= 10.0:\n conceito = 'A Aprovado'\nelif 7.5 < media_nota <= 9.0:\n conceito = 'B Aprovado'\nelif 6.0 < media_nota <= 7.5:\n conceito = 'C Aprovado'\nelif 4.0 < media_nota <= 6.0:\n conceito = 'D Reprovado'\nelse:\n conceito = 'E Reprovado'\nprint(\nf\"\"\"Nota 1: {primeira_nota:.2f}\nNota 2: {segunda_nota:.2f}\nMédia: {media_nota:.2f}\nConceito: {conceito}\"\"\"\n)\n","repo_name":"tiagotardelli/lista-python-brasil","sub_path":"02_estrutura_de_decisao/14_exercicio.py","file_name":"14_exercicio.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"23490846730","text":"#!/usr/bin/env python\nimport paho.mqtt.client as mqtt\nimport json\nimport os\nfrom datetime import datetime\nimport random\nimport signal\nimport sys\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--topic\", help=\"the mqtt topic to subscribe\", type=str)\nparser.add_argument(\"--qos\", help=\"the QOS of MQTT message\", type=int)\n\nargs = parser.parse_args()\n\nprint('Proccess id:', os.getpid())\n\nWRITE_FLAG = False\n#MQTT_TOPIC = [(\"$thing/OR000000insynerger05/$data/conf/#\",1)]\nMQTT_TOPIC = [(args.topic,args.qos)]\nprint(MQTT_TOPIC)\n\ndef signal_handler(sig, frame):\n client.disconnect()\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\nsignal.signal(signal.SIGTERM, signal_handler)\n\ndef on_connect(client, userdata, flags, rc): # The callback for when the client connects to the broker\n print(\"Connected with result code {0}\".format(str(rc))) # Print result of connection attempt\n client.subscribe(MQTT_TOPIC)\n \n\ndef on_message(client, userdata, msg): # The callback for when a PUBLISH message is received from the server.\n print('time: ',datetime.now().strftime(\"%H:%M:%S\"))\n print(msg.topic + \"||\" + str(msg.payload)+'\\n')\n\ndef on_disconnect(client, userdata, rc):\n if rc == 0:\n print(\"Gracefully disconnected.\")\n else:\n print(\"Unexpected disconnection.\")\n\nmqttc_id = 'debug:debug-client:'+ str(random.randint(0,9999))\nprint('MQTT-client-id:', mqttc_id)\n\nclient = mqtt.Client(client_id=mqttc_id)\nclient.username_pw_set(\"debug\",\"debug\")\n\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.on_disconnect = on_disconnect\n\nclient.connect(\"broker.insynerger.streetlightbroker.com\", 4883, 60)\nclient.loop_forever()","repo_name":"108356037/streetlamp_test_helper","sub_path":"subscribe/subscribe.py","file_name":"subscribe.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8416116498","text":"\"\"\"\nCapture events from rabbitmq and log them into arangodb\n\n\"\"\"\nimport json\nfrom asyncio import get_event_loop\nfrom logging import getLogger, StreamHandler\nfrom os import environ\nfrom pathlib import Path\n\nimport yaml\nfrom aio_pika import connect_robust, ExchangeType\n\n# pylint: disable=invalid-name\nfrom arango import DocumentInsertError, DocumentUpdateError\nfrom tentacruel.time import to_zulu_string, utcnow\n\nlogger = getLogger(__name__)\n\ndef connect_to_adb(config):\n \"\"\"\n Connect to arango database\n\n :param config:\n :return:\n \"\"\"\n from arango import ArangoClient\n client = ArangoClient(**config[\"arangodb\"][\"events\"][\"client\"])\n return client.db(**config[\"arangodb\"][\"events\"][\"database\"])\n\nclass LogToADB:\n \"\"\"\n Application that loads events from RabbitMQ into arangodb database\n\n \"\"\"\n def __init__(self):\n if \"LOGGING_LEVEL\" in environ:\n getLogger(None).setLevel(environ[\"LOGGING_LEVEL\"])\n\n getLogger(None).addHandler(StreamHandler())\n with open(Path.home() / \".tentacruel\" / \"config.yaml\") as a_stream:\n self.config = yaml.load(a_stream)\n adb = connect_to_adb(self.config)\n self.collection = adb.collection(\"sqs_events\")\n self.attributes = adb.collection(\"attributes\")\n self.connection = None\n\n async def setup(self):\n \"\"\"\n Initialization that can only be completed in an async method\n \"\"\"\n self.connection = await connect_robust(\n loop=get_event_loop(),\n **self.config[\"pika\"]\n )\n\n async def log_events(self):\n \"\"\"\n Get events from Q and log them in the database\n\n :return:\n \"\"\"\n await self.setup()\n async with self.connection:\n channel = await self.connection.channel()\n\n exchange = await channel.declare_exchange(\n 'smartthings',\n ExchangeType.FANOUT\n )\n\n queue = await channel.declare_queue(exclusive=True)\n await queue.bind(exchange, routing_key=\"\")\n async with queue.iterator() as messages:\n async for message in messages:\n with message.process():\n event = json.loads(message.body)\n event[\"loggedTime\"] = to_zulu_string(utcnow())\n logger.debug(\"Received event: %s\", event)\n try:\n self.collection.insert(event, silent=True)\n except DocumentInsertError as error:\n if error.error_code != 1210:\n raise\n if \"attribute\" in event and \"value\" in event:\n packet = {\n \"_key\": event[\"deviceId\"],\n event[\"attribute\"]: {\n \"value\": event[\"value\"],\n \"eventTime\": event[\"eventTime\"]\n }\n }\n\n try:\n self.attributes.update(packet, silent=True)\n except DocumentUpdateError as error:\n if error.error_code != 1202:\n raise\n self.attributes.insert(packet, silent=True)\n","repo_name":"paulhoule/tentacruel","sub_path":"tentacruel/log_to_adb/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21519122546","text":"import cv2\nimport numpy as np\nimport Camera\n\ndef nothing(x):\n pass\n\n# Create a black image, a window\ncv2.namedWindow('image', cv2.WINDOW_NORMAL)\n\n\n# create trackbars for color change\ncv2.createTrackbar('Hue Low','image',0,255,nothing)\ncv2.createTrackbar('Sat Low','image',0,255,nothing)\ncv2.createTrackbar('Value Low','image',0,255,nothing)\ncv2.createTrackbar('Hue High','image',0,255,nothing)\ncv2.createTrackbar('Sat High','image',0,255,nothing)\ncv2.createTrackbar('Value High','image',0,255,nothing)\n\nimg = None\nCamera.init()\nwhile img is None:\n img = Camera.grabPicture()\n if img is not None:\n img = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n\nwhile(1):\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break\n\n # get current positions of six trackbars\n hL = cv2.getTrackbarPos('Hue Low','image')\n sL = cv2.getTrackbarPos('Sat Low','image')\n vL = cv2.getTrackbarPos('Value Low','image')\n hH = cv2.getTrackbarPos('Hue High','image')\n sH = cv2.getTrackbarPos('Sat High','image')\n vH = cv2.getTrackbarPos('Value High','image')\n lower = np.array([hL,sL,vL], dtype=np.uint8)\n upper = np.array([hH,sH,vH], dtype=np.uint8)\n\n mask = cv2.inRange(img, lower, upper)\n mask = cv2.bitwise_not(mask)\n cv2.imshow('image',mask)\n\ncv2.destroyAllWindows()\n","repo_name":"GastricFluid/LHS_Robotics_2016-2017","sub_path":"Imaging/hsvPicker.py","file_name":"hsvPicker.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3762795701","text":"from cmsplugin_tabs.models import TabHeaderPlugin\nfrom django.template.loader import find_template\n\ndef tabs_plugin_processor(instance, placeholder, rendered_content, original_context):\n request = original_context['request']\n if isinstance(instance, TabHeaderPlugin):\n wrap_info = {\n 'wrapper_plugin': instance,\n 'context': original_context,\n 'plugins': [],\n 'plugin_counter': instance.tab_count,\n }\n request.wrap_info = wrap_info\n else:\n wrap_info = getattr(request, 'wrap_info', None)\n todo = wrap_info and wrap_info['plugin_counter']\n if todo and not(instance._render_meta.text_enabled and instance.parent):\n wrap_info['plugin_counter'] -= 1\n wrap_info['plugins'].append(rendered_content)\n if wrap_info['plugin_counter'] == 0 or original_context['plugin']['last']:\n wrapper_plugin = wrap_info['wrapper_plugin']\n template = find_template(wrapper_plugin.template)[0]\n context = wrap_info['context']\n context['plugins'] = wrap_info['plugins']\n request.wrap_info = None\n return template.render(context)\n else:\n # we're not in a wrapper, just return what we got untouched\n return rendered_content\n return u\"\"\n","repo_name":"imagescape/cmsplugin-tabs","sub_path":"cmsplugin_tabs/plugin_processors.py","file_name":"plugin_processors.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"85369419","text":"def solution(N):\r\n i = 1\r\n answer = 0\r\n\r\n while i ** 2 <= N: # 한 숫자로 나눠지면 나눈 수, 몫 쌍\r\n if i ** 2 == N: # 1인 경우는 자기자신으로 나뉠때 factor가 1뿐이다.\r\n answer += 1\r\n\r\n elif N % i == 0: # i로 나눠진다는뜻은 나눈 몫, i값이 factor\r\n answer += 2\r\n\r\n i += 1\r\n print(answer)\r\n return answer\r\n\r\nN=2\r\n\r\nsolution(N)","repo_name":"Areum0921/Abox","sub_path":"codility lesson/Lesson10_CountFactors.py","file_name":"Lesson10_CountFactors.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"36509801216","text":"import pandas as pd\nimport sqlite3\nimport time\n\n# First method using pandas\ndef get_avg_pandas(model):\n df = pd.read_csv(\"harddrive.csv\")\n df = df[df[\"model\"] == model]\n daily_count = df.groupby([\"date\",\"model\"])[\"date\"].count()\n average = daily_count.mean()\n print(f\"Average of daily readings performed: {average}\")\n\n# Best approach using sqlite and indexes.\ndef index_data():\n db = sqlite3.connect(\"harddrive.sqlite\")\n for df in pd.read_csv(\"harddrive.csv\",\n usecols=[\"date\", \"model\"],\n chunksize=1000000):\n df.to_sql(\"harddrive\", db, if_exists=\"append\")\n \n # Create index for the model column.\n # Take the execution time.\n start_time = time.time()\n db.execute(\"CREATE INDEX model ON harddrive(model)\")\n db.close()\n\n# Define a simple function to get the daily avg given certain hardrive model\ndef get_daily_avg(model):\n connection = sqlite3.connect(\"harddrive.sqlite\")\n # another_query = \"SELECT COUNT(`date`) FROM harddrive WHERE model = ? GROUP BY `date`\"\n sub_query = \"SELECT COUNT(*) as daily_count FROM harddrive WHERE model = ? GROUP BY `date`\"\n query = f\"SELECT AVG(daily_count) FROM ({sub_query}) AS daily_count_table\"\n values = (model,)\n data = pd.read_sql_query(query, connection, params=values)\n connection.close()\n print(f\"Average of daily readings performed: {data}\")\n return data\n\nprint(\"Calculating...\")\n\n# First approach\nstart_time = time.time()\nget_avg_pandas(\"Hitachi HDS5C3030ALA630\")\nprint(f\"Execution time using only pandas: {time.time() - start_time} seconds \\n\")\n\n# Second approach\nindex_data()\nstart_time = time.time()\nget_daily_avg(\"Hitachi HDS5C3030ALA630\")\nprint(f\"Execution time using pandas + sqlite: {time.time() - start_time} seconds\")\n","repo_name":"beardboi/Hard-Drive-Test-Data","sub_path":"harddrive_3.py","file_name":"harddrive_3.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22148437381","text":"class Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n # make the arrays of the length provided\n # merge both arrays and sort them\n while len(nums1) != m:\n nums1.pop()\n nums1 += nums2\n nums1.sort()\n \n","repo_name":"Elly0816/LeetCode-Exercises","sub_path":"88-merge-sorted-array/88-merge-sorted-array.py","file_name":"88-merge-sorted-array.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19437608614","text":"import frappe\nfrom frappe import _\n\ndef execute(filters ):\n\tcolumns, data = [], []\n\tdata = get_all_leave_application(filters)\n\tcolumns = get_columns()\n\treturn columns, data\n\n\ndef get_all_leave_application(filters):\n\treturn frappe.db.get_all(\"Leave Application\", \n\t['employee_name','leave_type', 'from_date', 'to_date'],filters)\n\n\ndef get_columns():\n\tcolumns = [{\n \"fieldname\": \"employee_name\",\n \"label\": _(\"Employee Name\"),\n \"fieldtype\": \"Data\",\n},\n{\n \"fieldname\": \"leave_type\",\n \"label\": _(\"Leave Type\"),\n \"fieldtype\": \"Link\",\n \"options\": \"Leave Type\",\n},\n{\n \"fieldname\": \"from_date\",\n \"label\": _(\"From Date\"),\n \"fieldtype\": \"Date\",\n \"options\": \"Leave Type\",\n},\n{\n \"fieldname\": \"to_date\",\n \"label\": _(\"To Date\"),\n \"fieldtype\": \"Date\",\n}\n] \n\treturn columns","repo_name":"husamhammad/human_resource","sub_path":"human_resource/human_resource/report/leave_application_script_report/leave_application_script_report.py","file_name":"leave_application_script_report.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23305955044","text":"# Python Program To Solve Tower Of Hanoi Problem\r\n\r\n'''\r\nFunction Name : Tower Of Hanoi Logic. \r\nFunction Date : 8 Sep 2020\r\nFunction Author : Prasad Dangare\r\nInput : Integer\r\nOutput : Integer \r\n'''\r\n\r\ndef towers(n, a, c, b):\r\n if n == 1:\r\n# If Only 1 Disk , Then Move It From A To C\r\n \r\n print('Move Disk %i From Pole %s To Pole %s' %(n, a, c))\r\n else: # If More Than 1 Disk\r\n \r\n# Move First n-1 Disk From A To B Using C As Intermidate Pole\r\n\r\n towers(n-1, a, b, c) \r\n \r\n# Move Remaining 1 Disk From A To C\r\n\r\n print('Move Disk %i From Pole %s To Pole %s' %(n, a, c))\r\n \r\n# Move n-1 Disk From B To C Using A As Intermidate Pole\r\n\r\n towers(n-1, b, c, a)\r\n \r\n# Call The Function\r\n\r\nn = int(input('Enter NNumber Of Disks : '))\r\n\r\n# We Should Change N Disk From A To C Using B As Intermidate Pole\r\n\r\ntowers(n, 'A', 'C', 'B')","repo_name":"PRASAD-DANGARE/PYTHON","sub_path":"hanoi.py","file_name":"hanoi.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17460359178","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\"\"\"Tests for Webcompat Janitor Business Rules.\"\"\"\n\nimport json\nimport unittest\n\nfrom janitor import validation\n\nFIXTURE_DIR = './tests/fixtures/'\n\n\nclass TestJanitorLabels(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def read_issue(self, issue_fixture):\n \"\"\"Reads the fixture for a test.\"\"\"\n with open(FIXTURE_DIR + issue_fixture) as f:\n json_issue = json.load(f)\n return json_issue\n\n def test_get_status_labels(self):\n \"\"\"Returns the list of labels.\"\"\"\n json_issue = self.read_issue('issue_with_status_label.json')\n expected = ['status-needstriage']\n actual = validation.get_status_labels(json_issue)\n self.assertListEqual(expected, actual)\n json_issue = self.read_issue('issue_no_status_label.json')\n actual = validation.get_status_labels(json_issue)\n self.assertIsNone(actual)\n\n def test_has_status_label(self):\n \"\"\"Send True when issue has status label, else False.\"\"\"\n json_issue = self.read_issue('issue_no_status_label.json')\n actual = validation.has_status_label(json_issue)\n self.assertFalse(actual)\n json_issue = self.read_issue('issue_with_status_label.json')\n actual = validation.has_status_label(json_issue)\n self.assertTrue(actual)\n\n def test_has_conflicting_status(self):\n \"\"\"Send True for mutually exclusive labels.\"\"\"\n json_issue = self.read_issue('issue_multiple_status_labels.json')\n actual = validation.has_conflicting_status(json_issue)\n self.assertTrue(actual)\n json_issue = self.read_issue('issue_with_status_label.json')\n actual = validation.has_wrong_status(json_issue)\n self.assertFalse(actual)\n\n def test_has_wrong_status(self):\n \"\"\"Send True for issues with wrong labels with regards to state.\"\"\"\n json_issue = self.read_issue('issue_wrong_open_status_label.json')\n actual = validation.has_wrong_status(json_issue)\n self.assertTrue(actual)\n json_issue = self.read_issue('issue_wrong_closed_status_label.json')\n actual = validation.has_wrong_status(json_issue)\n self.assertTrue(actual)\n json_issue = self.read_issue('issue_with_status_label.json')\n actual = validation.has_wrong_status(json_issue)\n self.assertFalse(actual)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"karlcow/webcompat-janitor","sub_path":"tests/test_labels.py","file_name":"test_labels.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13167425750","text":"\"\"\" Code for running pyflakes checks in Vim buffer\n\nThe main function is ``check``, which runs the pyflakes check on a buffer.\n\"\"\"\n\nimport sys\nimport ast\nfrom operator import attrgetter\nimport re\n\nfrom pyflakes import checker, messages\n\ntry:\n # Vim module available within vim\n import vim\nexcept ImportError:\n # Otherwise, mock it up for tests\n from mock import Mock\n vim = Mock()\n\n\nclass loc(object):\n\n def __init__(self, lineno, col=None):\n self.lineno = lineno\n self.col_offset = col\n\n\nclass SyntaxError(messages.Message):\n\n message = 'could not compile: %s'\n\n def __init__(self, filename, lineno, col, message):\n messages.Message.__init__(self, filename, loc(lineno, col))\n self.message_args = (message,)\n self.lineno = lineno\n\n\nclass blackhole(object):\n write = flush = lambda *a, **k: None\n\n\ndef check(buffer):\n filename = buffer.name\n contents = buffer[:]\n\n # shebang usually found at the top of the file, followed by source code encoding marker.\n # assume everything else that follows is encoded in the encoding.\n for n, line in enumerate(contents):\n if n >= 2:\n break\n elif re.match(r'#.*coding[:=]\\s*([-\\w.]+)', line):\n contents = ['']*(n+1) + contents[n+1:]\n break\n\n contents = '\\n'.join(contents) + '\\n'\n\n vimenc = vim.eval('&encoding')\n if vimenc and hasattr(contents, 'decode'):\n contents = contents.decode(vimenc)\n\n builtins = set(['__file__'])\n try:\n builtins.update(set(eval(vim.eval('string(g:pyflakes_builtins)'))))\n except Exception:\n pass\n\n try:\n # TODO: use warnings filters instead of ignoring stderr\n old_stderr, sys.stderr = sys.stderr, blackhole()\n try:\n tree = ast.parse(contents, filename or '')\n finally:\n sys.stderr = old_stderr\n except:\n exc_value = sys.exc_info()[1]\n try:\n lineno = exc_value.lineno\n offset = exc_value.offset\n line = exc_value.text\n except IndexError:\n lineno, offset, line = 1, 0, ''\n if line and line.endswith(\"\\n\"):\n line = line[:-1]\n\n return [SyntaxError(filename, lineno, offset, str(exc_value))]\n else:\n # pyflakes looks to _MAGIC_GLOBALS in checker.py to see which\n # UndefinedNames to ignore\n old_globals = getattr(checker,' _MAGIC_GLOBALS', [])\n checker._MAGIC_GLOBALS = set(old_globals) | builtins\n\n filename = '(none)' if filename is None else filename\n w = checker.Checker(tree, filename)\n\n checker._MAGIC_GLOBALS = old_globals\n\n w.messages.sort(key = attrgetter('lineno'))\n return w.messages\n\n\ndef vim_quote(s):\n return s.replace(\"'\", \"''\")\n","repo_name":"kevinw/pyflakes-vim","sub_path":"ftplugin/python/flaker.py","file_name":"flaker.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":396,"dataset":"github-code","pt":"81"} +{"seq_id":"5027499286","text":"\"\"\"\n__author__ = 'Christopher Fagiani'\n\"\"\"\n\nimport ConfigParser\nimport time\nimport logging\nfrom random import randrange\nfrom random import randint\nfrom hit_detector import SensorInitializationError\n\nlog = logging.getLogger(__name__)\n\n\nclass WorkoutController(object):\n \"\"\"\n This class handles the main logic of a \"workout\" which consists of a number of punching bag hits. It will use\n the led_controls to signal the user to hit the bag and the hit_detector to wait for the hit.\n \"\"\"\n\n def __init__(self, conf_file, controller=None, detector=None):\n try:\n # read the configuration file\n config = ConfigParser.RawConfigParser()\n config.read(conf_file)\n self.cur_workout = None\n self.is_running = False\n self.detect_dir = config.getboolean(\"workout\", \"detect_direction\")\n self.calibration_timeout = config.getint(\"sensor\", \"calibration_timeout\")\n self.random_delay = config.getboolean(\"workout\", \"random_delay\")\n if controller:\n self.led_controller = controller\n else:\n from engine.io import led_controls\n # initialize the led_controller by building a dictionary of light_id to pins\n self.led_controller = led_controls.LedController({\"r\": config.getint(\"lights\", \"right\"),\n \"l\": config.getint(\"lights\", \"left\"),\n \"c\": config.getint(\"lights\", \"center\")})\n\n self.hit_timeout = config.getfloat(\"workout\", \"reaction_timeout\")\n self.recoil_wait = config.getfloat(\"workout\", \"recoil_wait\")\n self.calibration_hits = config.getint(\"workout\", \"calibration_hits\")\n if detector:\n self.hit_detector = detector\n else:\n import hit_detector\n # initialize the hit_detector\n self.hit_detector = hit_detector.HitDetector(config.getfloat(\"sensor\", \"threshold\"),\n config.getfloat(\"sensor\", \"calibration_timeout\"),\n config.getint(\"sensor\", \"samples\"),\n detect_dir=self.detect_dir)\n except BaseException as e:\n # if we had an error during initialization call clean-up so we can release any resources\n try:\n self.cleanup()\n except BaseException as e2:\n log.error(\"Could not clean up {msg}\".format(msg=e2.message))\n raise e\n raise e\n\n def has_valid_calibration(self):\n return self.hit_detector.has_valid_calibration()\n\n def calibrate_orientation(self):\n \"\"\"\n Since we do not know how the hardware was mounted on the bag, we need to ask the user to hit the bag on each side\n so we can calibrate the hit-detector for that direction. After getting a reading for all 3 directions, it will\n validate the calibration. If invalid, it will repeat the calibration process up to calibration_hits times. If a\n valid calibration is not read after calibration_hits iterations, a SensorInitializationError is raised.\n :param timeout:\n :return:\n \"\"\"\n for i in range(self.calibration_hits):\n self.led_controller.flash()\n self.led_controller.activate_lights('r')\n r_val = self.hit_detector.calibrate_hit('r', self.calibration_timeout)\n self.hit_detector.wait_for_stability(self.recoil_wait)\n time.sleep(0.5)\n self.led_controller.activate_lights('l')\n l_val = self.hit_detector.calibrate_hit('l', self.calibration_timeout)\n self.hit_detector.wait_for_stability(self.recoil_wait)\n time.sleep(0.5)\n self.led_controller.activate_lights('c')\n c_val = self.hit_detector.calibrate_hit('c', self.calibration_timeout)\n self.led_controller.activate_lights('')\n if self.hit_detector.has_valid_calibration():\n log.debug(\"Got valid calibration r: {r}, l: {lv}, c: {c}\".format(r=r_val, lv=l_val, c=c_val))\n return\n else:\n log.info(\"Invalid calibration r: {r}, l: {lv}, c: {c}\".format(r=r_val, lv=l_val, c=c_val))\n raise SensorInitializationError(\"Could not obtain a valid calibration\")\n\n def start_workout(self, mode, workout_time, frequencies={'l': 33, 'c': 33, 'r': 34}):\n \"\"\"\n Executes the workout loop until it is over (either time elapses or the programmed workout is finished).\n\n :param mode:\n :param workout_time:\n :param frequencies:\n :return:\n \"\"\"\n self.is_running = True\n deadline = time.time() + workout_time * 60\n self.cur_workout = WorkoutState(deadline)\n round_num = 0\n hit_count = 0\n miss_count = 0\n sides = ['r', 'c', 'l']\n validate_frequencies(frequencies)\n while time.time() < deadline and self.is_running:\n self.led_controller.activate_lights('')\n self.hit_detector.wait_for_stability(self.recoil_wait)\n if self.random_delay:\n time.sleep(randint(0, 4))\n if mode == 'random':\n side = get_next_side(frequencies)\n else:\n side = sides[round_num % 3]\n if self.await_hit(side):\n hit_count += 1\n else:\n miss_count += 1\n round_num += 1\n self.led_controller.activate_lights('')\n self.is_running = False\n return self.cur_workout\n\n def await_hit(self, side):\n \"\"\"\n Turns on a light and waits for the hit_detector to register a hit.\n :param side:\n :return:\n \"\"\"\n self.led_controller.activate_lights(side)\n start_time = time.time()\n hit_val, is_correct = self.hit_detector.wait_for_hit(side, self.hit_timeout)\n reaction_time = time.time() - start_time\n if hit_val:\n self.cur_workout.record_hit(side, reaction_time, is_correct)\n if is_correct:\n return hit_val\n else:\n return None\n\n def cleanup(self):\n self.led_controller.cleanup()\n\n def get_state(self):\n self.cur_workout.server_time = time.time()\n return self.cur_workout\n\n def stop_workout(self):\n self.is_running = False\n\n\ndef validate_frequencies(frequencies):\n \"\"\"\n Validates that the frequencies passed in add up to 100 and do not contain negatives.\n :param frequencies:\n :return:\n \"\"\"\n if sum(frequencies.values()) != 100:\n raise ConfigurationError(\"Frequency weights must add up to 100\")\n if len([z for z in frequencies.values() if 0 <= z <= 100]) != len(frequencies):\n raise ConfigurationError(\"Frequency weights must be between 0 and 100, inclusive.\")\n\n\ndef get_next_side(frequencies):\n \"\"\" Returns a key from frequencies using the values to weigh the likelihood of selection.\n This assumes the frequency map has values in the range of 0,100 (inclusive) and that they add up to 100.\"\"\"\n val = randrange(1, 101)\n last_weight = 0\n for side, weight in frequencies.iteritems():\n if 0 < val <= last_weight + weight:\n return side\n else:\n last_weight += weight\n\n\nclass WorkoutState(object):\n\n def __init__(self, deadline):\n self.correct_hits = []\n self.incorrect_hits = []\n self.timeouts = 0\n self.deadline = deadline\n self.server_time = time.time()\n\n def record_hit(self, direction, reaction_time, is_correct):\n dest = self.correct_hits if is_correct else self.incorrect_hits\n dest.append(HitStats(direction, reaction_time))\n\n def record_timeout(self):\n self.timeouts += 1\n\n\nclass HitStats(object):\n\n def __init__(self, direction, reaction_time):\n self.direction = direction\n self.time = reaction_time\n\n\nclass ConfigurationError(Exception):\n pass\n","repo_name":"cfagiani/sparpi","sub_path":"engine/workout_controller.py","file_name":"workout_controller.py","file_ext":"py","file_size_in_byte":8171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15649798014","text":"import unittest\nimport datetime as dt\nfrom diary import Event\n\nDEFAULT_FORMATTER = \"({level})({info})\"\n\nclass FormattedEvent(Event):\n formatter = DEFAULT_FORMATTER\n\n\nclass TestEvent(unittest.TestCase):\n INFO = \"something was logged\"\n LEVEL = \"CRITICAL\"\n DEFAULT_FORMATTER = DEFAULT_FORMATTER\n\n def setUp(self):\n self.basicEvent = Event(self.INFO, self.LEVEL)\n self.basicEvent.set_formatter(None)\n self.formatted_event = FormattedEvent(self.INFO, self.INFO)\n FormattedEvent.set_formatter(DEFAULT_FORMATTER)\n\n def test_has_dt(self):\n event = Event(self.INFO, self.LEVEL)\n self.assertIsNotNone(self.basicEvent.dt)\n\n def test_takes_arguments(self):\n given_dt = dt.datetime.now()\n event = Event(self.INFO, self.LEVEL, given_dt)\n self.assertEquals(event.dt, given_dt)\n self.assertEquals(event.info, self.INFO)\n self.assertEquals(event.level, self.LEVEL)\n self.assertEquals(event.formatter, None)\n\n def test_func_formatter(self):\n def quick_format(e):\n return \"({level})({text})\".format(level = e.level,\n text = e.info)\n\n FormattedEvent.set_formatter(quick_format)\n\n event = FormattedEvent(self.INFO, self.LEVEL)\n\n self.assertEquals(event.formatted(), \"({level})({text})\".format(\n level = event.level, text = event.info))\n\n self.assertEquals(str(event), \"({level})({text})\".format(\n level=event.level, text=event.info))\n\n event.info = \"\"\n\n self.assertEquals(event.formatted(), \"({level})({text})\".format(\n level = event.level, text = event.info))\n\n self.assertEquals(str(event), \"({level})({text})\".format(\n level=event.level, text=event.info))\n\n def test_str_formatter(self):\n event = FormattedEvent(self.INFO, self.LEVEL)\n\n self.assertEquals(event.formatted(), \"({level})({text})\".format(\n level=event.level, text=event.info))\n\n self.assertEquals(str(event), \"({level})({text})\".format(\n level=event.level, text=event.info))\n\n self.assertEquals(event.formatter, DEFAULT_FORMATTER)\n\n event.info = \"\"\n\n self.assertEquals(event.formatted(), \"({level})({text})\".format(\n level=event.level, text=event.info))\n\n self.assertEquals(str(event), \"({level})({text})\".format(\n level=event.level, text=event.info))\n\n def test_bad_formatter(self):\n formatter = 5\n with self.assertRaises(ValueError,\n msg=\"Could not identify formatter {}\".format(formatter)):\n event = FormattedEvent(self.INFO, self.LEVEL)\n event.set_formatter(formatter)\n\n def test_set_formatter(self):\n class MutableFormattedEvent(Event):\n pass\n event = MutableFormattedEvent(self.INFO, self.LEVEL)\n self.assertIsNone(event.formatter)\n with self.assertRaises(AttributeError,\n msg=\"Event instance has no attribute 'formatted'\"):\n event.formatted()\n\n event.set_formatter(\"({level})({info})\")\n self.assertEquals(event.formatted(), \"({level})({info})\".format(\n level=event.level, info=event.info))\n\n event.set_formatter(None)\n self.assertIsNone(event.formatter)\n\n def test_no_formatter(self):\n class NoFormattedEvent(Event):\n pass\n\n event = NoFormattedEvent(self.INFO, self.LEVEL)\n\n self.assertIsNone(self.basicEvent.formatter)\n with self.assertRaises(AttributeError,\n msg=\"{} does not have a valid formatter: {}\".format(\n self.basicEvent, self.basicEvent.formatter)\n ):\n self.basicEvent.formatted()\n\n self.assertEquals(str(self.basicEvent), repr(self.basicEvent))\n\n def test_init_set_formatter(self):\n class ToBeFormattedEvent(Event):\n pass\n\n event = ToBeFormattedEvent(self.INFO, self.LEVEL)\n with self.assertRaises(AttributeError,\n msg=\"{} does not have a valid formatter: {}\".format(\n self.basicEvent, self.basicEvent.formatter)\n ):\n event.formatted()\n event.formatter = \"{info}\"\n old_formatted = event.formatted\n self.assertEquals(self.INFO, event.formatted())\n self.assertIsNot(old_formatted, event.formatted)\n\n def test_formatted_preset(self):\n class GoodLookingEvent(Event):\n formatter = \"{info}::{level}\"\n\n event = GoodLookingEvent(self.INFO, self.LEVEL)\n first_format = event.formatted()\n second_format = event.formatted()\n self.assertEquals(first_format, \"{info}::{level}\".format(info=self.INFO,\n level=self.LEVEL))\n self.assertEquals(first_format, second_format)\n\n def test_set_level(self):\n mock_level = lambda: None\n event_to_change = Event(self.INFO, mock_level)\n self.assertIs(event_to_change.level, mock_level)\n self.assertEquals(event_to_change.level_str, mock_level.__name__.upper())\n\n new_level = lambda: None\n event_to_change.set_level(new_level)\n self.assertIs(event_to_change.level, new_level)\n self.assertEquals(event_to_change.level_str, new_level.__name__.upper())\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"SamGRosen/diary","sub_path":"tests/events_test.py","file_name":"events_test.py","file_ext":"py","file_size_in_byte":5385,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"16925923338","text":"\nwhile True :\n print('숫자의 공백을 주세요 예) 4 5')\n number = input('두 수를 입력하세요. (종료:프로그램종료):').split()\n if number[0] == '종료' :\n break\n if len(number) == 2 :\n try:\n a = int(number[0])\n b = int(number[1])\n except:\n try:\n number[0] == int(number[0])\n except:\n print('첫번째 입력이 %s 입니다.' % number[0])\n try:\n number[1] == int(number[1])\n except:\n print('두번째 입력이 %s 입니다.' % number[1])\n else:\n try:\n number[1] != 0\n print(a + b)\n print(a - b)\n print(a * b)\n print(a / b)\n except:\n print('죄송합니다. 두 번째 입력에서 0을 입력하셨습니다. 분모는 0이 되어서는 안됩니다.')\n else:\n print('두 수를 입력하세요.')\n","repo_name":"itminha123/jumptopython","sub_path":"01.jumptopython/chap05/practice/chap5_practice_5.py","file_name":"chap5_practice_5.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18156767808","text":"import copy\nimport math\n\nimport cv2 as cv\nimport globalvars\nimport numpy as np\n\n\ndef foccal(pt, step, minar, variance):\n stepar = []\n opar = []\n\n varwin = variance * step / ((2 * math.pi) ** 0.5)\n for i in range(1, 12):\n x = minar + (i - 1) * step\n\n stepar.append(np.round(x, 2))\n func = (1 / (((2 * math.pi) ** 0.5) * varwin)) * math.exp(-(x - pt) ** 2 / (2 * (varwin ** 2)))\n func = np.round(func, 8)\n opar.append(func)\n foclist = opar\n gpar = []\n\n focar = np.round(np.max(foclist) - foclist)\n for i in range(0, 10):\n gpar.append(2 * int(focar[i]) + 1)\n\n return gpar\n\n\ndef newmaskimg(image, array, blurparam):\n step = np.round((np.max(array) - np.min(array)) / 10, 3)\n minar = np.min(array)\n\n stepar = []\n for i in range(1, 12):\n x = minar + (i - 1) * step\n stepar.append(x)\n\n brack = []\n for i in range(0, 10):\n brack.append([stepar[i], stepar[i + 1]])\n brack[9][1] = 1\n\n masks = []\n temp_table = np.zeros((globalvars.img.shape[0], globalvars.img.shape[1]))\n for i in range(0, 10):\n newarr = copy.copy(array)\n newarr[np.where(newarr > brack[i][1])] = 0\n newarr[np.where(newarr < brack[i][0])] = 0\n newarr[np.where(newarr > 0)] = 1\n\n resized = cv.resize(newarr, (globalvars.img.shape[1], globalvars.img.shape[0]), interpolation=cv.INTER_AREA)\n\n temp_table += i * resized[:, :, 0]\n if resized.shape[2] != 3:\n mask = np.dstack([resized, resized, resized])\n else:\n mask = resized\n\n masks.append(mask)\n\n imar = []\n for i in range(4):\n imar.append(cv.GaussianBlur(image, (blurparam * i + 1, blurparam * i + 1), 0))\n return imar, masks, temp_table\n\n\ndef renderopfast(blimg, blarray, image, masks):\n oparray = []\n for j in range(10):\n recons = np.zeros(image.shape)\n for i in range(10):\n if blarray[j][i] == 1:\n recons = recons + masks[i] * blimg[0]\n elif blarray[j][i] == 3:\n recons = recons + masks[i] * blimg[1]\n elif blarray[j][i] == 5:\n recons = recons + masks[i] * blimg[2]\n elif blarray[j][i] == 7:\n recons = recons + masks[i] * blimg[3]\n\n oparray.append(recons)\n\n return np.uint8(oparray)\n","repo_name":"nawaldua/Dynamic-Depth-of-Field-with-Eye-Tracking","sub_path":"code/blurproc.py","file_name":"blurproc.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21867953682","text":"# Collabrative Filtering data set\n\ndataset={\n 'Fadzil': {\n 'Superman Returns': 3.5,\n 'You, Me and AI': 2.5,\n 'Harry Potter': 5.0,\n 'Alice in Wonderland': 3.0},\n \n 'Shafiq': {\n 'Alice in Wonderland': 3.0,\n 'AI Note': 3.5,\n 'Machine Learning': 1.5,\n 'Introduction to AI': 3.0,\n 'You, Me and AI': 3.5},\n\n 'Azry': {\n 'Alice in Wonderland': 2.5,\n 'AI Note': 3.0,\n 'Superman Returns': 3.5,\n 'Harry Potter': 5.0,\n 'Introduction to AI': 4.0},\n \n 'Amin': {\n 'AI Note': 3.5,\n 'Machine Learning': 3.0,\n 'Introduction to AI': 4.5,\n 'Superman Returns': 4.0,\n 'You, Me and AI': 2.5},\n\n 'Sani': {\n 'Alice in Wonderland': 3.0,\n 'AI Note': 4.0,\n 'Machine Learning': 2.0,\n 'Superman Returns': 3.0,\n 'Introduction to AI': 3.0,\n 'You, Me and AI': 2.0},\n\n 'Arif': {\n 'Alice in Wonderland': 3.0,\n 'AI Note': 4.0,\n 'Introduction to AI': 3.0,\n 'Superman Returns': 5.0,\n 'You, Me and AI': 3.5},\n \n 'Shahril': {\n 'AI Note':4.5,\n 'You, Me and AI':1.0,\n 'Superman Returns':4.0}}","repo_name":"fadziljusri/BookRecommendation-AI","sub_path":"recommendation_data.py","file_name":"recommendation_data.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24930182518","text":"# Uses python3\ndef evalt(a, b, op):\n if op == '+':\n return a + b\n elif op == '-':\n return a - b\n elif op == '*':\n return a * b\n else:\n assert False\n\n\ndef get_maximum_value(dataset):\n \"\"\"\n As described in the lectures, to get to the maximum value of the expression, we need the maximum and minimum value\n of all possible sub-expressions. The recurrence relationship looks like:\n min_[i][j] = min([min_[i][k] op_k min_[k+1][j], min_[i][k] op_k max_[k+1][j],\n max_[i][k] op_k min_[k+1][j], max_[i][k] op_k max_[k+1][j]] for all k in the range(i, j))\n max_[i][j] = max([min_[i][k] op_k min_[k+1][j], min_[i][k] op_k max_[k+1][j],\n max_[i][k] op_k min_[k+1][j], max_[i][k] op_k max_[k+1][j]] for all k in the range(i, j))\n :param dataset:\n :return:\n \"\"\"\n # Getting the input dataset in the form of list of digits and list of operations\n digits = list()\n ops = list()\n for i, symbol in enumerate(dataset):\n if i % 2 == 0:\n digits.append(int(symbol))\n else:\n ops.append(symbol)\n n = len(digits)\n\n # Initialising the maximum and minimum 2D matrices to all zeros\n max_ = list()\n min_ = list()\n for i in range(n):\n maxi_ = list()\n mini_ = list()\n for j in range(n):\n maxi_.append(0)\n mini_.append(0)\n max_.append(maxi_)\n min_.append(mini_)\n\n # Going from difference j - i = 0 to n-1, compute the minimum and maximum possible values of the sub-expression\n for i in range(n):\n max_[i][i] = digits[i]\n min_[i][i] = digits[i]\n\n for diff in range(1, n):\n for j in range(diff, n):\n i = j - diff\n minij = list()\n maxij = list()\n # i = 0, j = 1, k = 0\n for k in range(i, j):\n minik = min_[i][k]\n minkj = min_[k+1][j]\n maxik = max_[i][k]\n maxkj = max_[k+1][j]\n alevals = [evalt(minik, minkj, ops[k]), evalt(minik, maxkj, ops[k]),\n evalt(maxik, minkj, ops[k]), evalt(maxik, maxkj, ops[k])]\n minij.append(min(alevals))\n maxij.append(max(alevals))\n min_[i][j] = min(minij)\n max_[i][j] = max(maxij)\n return max_[0][-1]\n\n\nif __name__ == \"__main__\":\n print(get_maximum_value(input()))\n","repo_name":"aditigupta3/Data-Structures-and-Algorithms-Specialisation","sub_path":"Algorithmic Toolbox/week6_dynamic_programming2/3_maximum_value_of_an_arithmetic_expression/placing_parentheses.py","file_name":"placing_parentheses.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1809263988","text":"import pandas as pd\nimport re\nimport pickle\n\n\n\n\n\n\n\n\n\npattern_deconstruct = re.compile(\"~[a-zA-Z]+\\(\\)\") \npattern_point = re.compile(\"\\*[a-zA-Z]+\") #也可能是乘法\npattern_memory_address = re.compile(\"&[a-zA-Z]+\") #也有可能是引用\npattern_func = re.compile(\"::\")\npattern_array = re.compile(\"[a-zA-Z]+[0-9]*\\[[0-9]*\\]\")\npattern_multiarray = re.compile(\"[a-zA-Z]+[0-9]*\\[[0-9]*\\]\\[[0-9]*\\]\")\npattern_datamember = re.compile(\"[A-Z][a-z0-9_]*.[a-z][a-zA-Z0-9_]*\")\npattern_pass_value_call = re.compile(\"[a-zA-Z_][a-zA-Z0-9_]*\\([a-zA-Z_][a-zA-Z0-9_]*(,\\s*[a-zA-Z_][a-zA-Z0-9_]*)*\\)\")\npattern_variable_declaration1 = re.compile(\"(int|float|bool|char|double|void|wchar_t)\\s[a-zA-Z_][a-zA-Z0-9_]*(,\\s*[a-zA-Z_][a-zA-Z0-9_]*)*;\")\npattern_variable_declaration2 = re.compile(\"extern\\s(int|float|bool|char|double|void|wchar_t)\\s[a-zA-Z_][a-zA-Z0-9_]*(,\\s*[a-zA-Z_][a-zA-Z0-9_]*)*;\")\npattern_object = re.compile(\"[A-Z][a-z0-9_]*\\s[A-Z][a-z0-9_]*(,\\s*[A-Z_][a-z0-9_]*)*;\")\npattern_function_declaration = re.compile(\"(int|float|bool|char|double|void|wchar_t)\\s[a-zA-Z_][a-zA-Z0-9_]*\\([a-zA-Z_][a-zA-Z0-9_]*(,\\s*[a-zA-Z_][a-zA-Z0-9_]*)*\\)\")\npattern_base_class = re.compile(\"class\\s[A-Z][a-z0-9_]*:\\s*(public|private|protected)\\s[A-Z][a-z0-9_]*\")\npattern_string = re.compile(\"char\\s[a-z][a-zA-Z0-9_]*\\[[0-9]*\\]\")\npattern_variable_definitions = re.compile(\"(int|float|bool|char|double|void|wchar_t)\\s[a-z][a-zA-Z0-9_]*(,\\s*[a-z][a-zA-Z0-9_]*)*\")\npattern_function_definitions = re.compile(\"(int|float|bool|char|double|void|wchar_t)\\s[a-z][a-zA-Z0-9_]*\\((int|float|bool|char|double|void|wchar_t)\\s[a-z][a-zA-Z0-9_]*(,\\s*(int|float|bool|char|double|void|wchar_t)\\s[a-z][a-zA-Z0-9_]*)*\\)\")\npattern_return_type = re.compile(\"(int|float|bool|char|double|void|wchar_t)\\s[a-zA-Z_][a-zA-Z0-9_]*\\([a-zA-Z_][a-zA-Z0-9_]*(,\\s*[a-zA-Z_][a-zA-Z0-9_]*)*\\)\")\npattern_return_type = re.compile(\"(int|float|bool|char|double|void|wchar_t)\\s[a-zA-Z_][a-zA-Z0-9_]*\\([a-zA-Z_][a-zA-Z0-9_]*(,\\s*[a-zA-Z_][a-zA-Z0-9_]*)*\\)\")\npattern_array_as_parameter1 = re.compile(\"(int|float|bool|char|double|void|wchar_t)\\s[a-z][a-zA-Z0-9_]*\\((int|float|bool|char|double|void|wchar_t)\\s[a-z][a-zA-Z0-9_]*\\[[0-9]*\\](,\\s*(int|float|bool|char|double|void|wchar_t)\\s[a-z][a-zA-Z0-9_]*\\[[0-9]*\\])*\\)\")\npattern_array_as_parameter2 = re.compile(\"(int|float|bool|char|double|void|wchar_t)\\s[a-z][a-zA-Z0-9_]*\\((int|float|bool|char|double|void|wchar_t)\\s[a-z][a-zA-Z0-9_]*\\[\\s*\\](,\\s*(int|float|bool|char|double|void|wchar_t)\\s[a-z][a-zA-Z0-9_]*\\[[0-9]*\\])*\\)\")\npattern_datamember = re.compile(\"[A-Z][a-z0-9_]*.[a-z][a-zA-Z0-9_]*\")\npattern_pointer_variable_declaration = re.compile(\"(int|float|bool|char|double|void|wchar_t)\\s\\*[a-z][a-zA-Z0-9_]*\")\npattern_pointer_array = re.compile(\"\\*[a-zA-Z][a-zA-Z0-9_]*\\[[a-zA-Z][a-zA-Z0-9_]*\\]\")\npattern_reference_statement = re.compile(\"(int|float|bool|char|double|void|wchar_t)&\\s*[a-zA-Z][a-zA-Z0-9_]*\")\npattern_pointer_to_structure = re.compile(\"struct\\s[a-zA-Z][a-zA-Z0-9_]*\\s\\*[a-zA-Z][a-zA-Z0-9_]*\")\npattern_string = re.compile(\"string\\s*\\*[a-zA-Z][a-zA-Z0-9_]*\\s*=\\s*(\\\"|\\').*(\\\"|\\')\")\npattern_derived_class = re.compile(\"class\\s*[A-Z][a-zA-Z0-9_]*\\s*:\\s*(public|private|protected)\\s*[A-Z][a-zA-Z0-9_]*\")\npattern_pure_virtual_function = re.compile(\"virtual\\s*(int|float|bool|char|double|void|wchar_t)\\s*[a-zA-Z][a-zA-Z0-9_]*\\(\\s*\\)\")\npattern_hexadecimal = re.compile(\"(0x)|(0X)[a-fA-F0-9]*\")\npattern_octal = re.compile(\"0[0-7]*\")\npattern_decimal = re.compile(\"[0-9]*\")\npattern_float_number = re.compile(\"[0-9][0-9]*.[0-9][0-9]*\")\npattern_float_e = re.compile(\"[0-9][0-9]*.[0-9][0-9]*e\")\npattern_float_E = re.compile(\"[0-9][0-9]*.[0-9][0-9]*E\")\npattern_str = re.compile(\"(\\\"|\\')[a-zA-Z0-9_]*\\s*(,|\\.|;|!|@|#|\\$|%|\\^|&|\\*|-|\\+|`|\\[|\\]|\\{|\\}|:|<|>|\\?)*\\s*[a-zA-Z0-9_]*(,|\\.|;|!|@|#|\\$|%|\\^|&|\\*|-|\\+|`|\\[|\\]|\\{|\\}|:|<|>|\\?)*\\s*(\\\"|\\')\")\npattern_miscellaneous_operator_choose = re.compile(\"[a-zA-Z0-9_]*\\s*(\\+|-|\\*|/|=|<|>)*\\s*[a-zA-Z0-9_]*\\s*(\\+|-|\\*|/|=|<|>)*\\s*[a-zA-Z0-9_]*\\s*\\?\\s*[a-zA-Z0-9_]*\\s*(\\+|-|\\*|/)*\\s*[a-zA-Z0-9_]*\\s*:\\s*[a-zA-Z0-9_]*(\\+|-|\\*|/)*[a-zA-Z0-9_]*\")\npattern_miscellaneous_operator_comma = re.compile(\"\\([a-zA-Z0-9_]*\\s*(\\+|-|\\*|\\\\|=|<|>|)*\\s*[a-zA-Z0-9_]*\\s*,(\\s*[a-zA-Z0-9_]*\\s*(\\+|-|\\*|\\\\|=|<|>|)*\\s*[a-zA-Z0-9_]*)*\\s*,\\s*[a-zA-Z0-9_]*\\s*(\\+|-|\\*|\\\\|=|<|>|)*\\s*[a-zA-Z0-9_]*\\s*\\)\")\npattern_miscellaneous_data_type_cast = re.compile(\"\\((int|float|bool|char|double|void|wchar_t)\\)\")\npattern_do_while = re.compile(\"\\s*do+\\s\\s*\")\npattern_if_else = re.compile(\"\\s*else+\\s\\s*\")\npattern_formal_parameter = re.compile(\"(int|float|bool|char|double|void|wchar_t)\\s[a-zA-Z_][a-zA-Z0-9_]*\\([a-zA-Z_][a-zA-Z0-9_]*(,\\s*[a-zA-Z_][a-zA-Z0-9_]*)*\\)\")\npattern_actual_parameter1 = re.compile(\"[a-zA-Z_][a-zA-Z0-9_]*\\([a-zA-Z_][a-zA-Z0-9_]*(,\\s*[a-zA-Z_][a-zA-Z0-9_]*)*\\)\")\npattern_actual_parameter2 = re.compile(\"[a-zA-Z_][a-zA-Z0-9_]*\\s*\\((int|float|bool|char|double|void|wchar_t)\\s*\\*[a-zA-Z][a-zA-Z0-9_]*\\s*(,\\s*(int|float|bool|char|double|void|wchar_t)\\s*\\*[a-zA-Z][a-zA-Z0-9_]*)*\\)\")\npattern_pointer_call = re.compile(\"[a-zA-Z_][a-zA-Z0-9_]*\\s*\\((int|float|bool|char|double|void|wchar_t)\\s*\\*[a-zA-Z][a-zA-Z0-9_]*\\s*(,\\s*(int|float|bool|char|double|void|wchar_t)\\s*\\*[a-zA-Z][a-zA-Z0-9_]*)*\\)\")\npattern_reference_call = re.compile(\"[a-zA-Z_][a-zA-Z0-9_]*\\s*\\((int|float|bool|char|double|void|wchar_t)\\s*&[a-zA-Z][a-zA-Z0-9_]*\\s*(,\\s*(int|float|bool|char|double|void|wchar_t)\\s*&[a-zA-Z][a-zA-Z0-9_]*)*\\)\")\npattern_cos1 = re.compile(\"cos\\((int|float|bool|char|double|void|wchar_t)\\s*[a-zA-Z_][a-zA-Z0-9_]*\\)\")\npattern_cos2 = re.compile(\"cos\\([a-zA-Z_][a-zA-Z0-9_]*\\)\")\npattern_sin1 = re.compile(\"sin\\((int|float|bool|char|double|void|wchar_t)\\s*[a-zA-Z_][a-zA-Z0-9_]*\\)\")\npattern_sin2 = re.compile(\"sin\\([a-zA-Z_][a-zA-Z0-9_]*\\)\")\npattern_tan1 = re.compile(\"tan\\((int|float|bool|char|double|void|wchar_t)\\s*[a-zA-Z_][a-zA-Z0-9_]*\\)\")\npattern_tan2 = re.compile(\"tan\\([a-zA-Z_][a-zA-Z0-9_]*\\)\")\npattern_log1 = re.compile(\"log\\([a-zA-Z_][a-zA-Z0-9_]*\\)\")\npattern_log2 = re.compile(\"log10\\([a-zA-Z_][a-zA-Z0-9_]*\\)\")\npattern_pow = re.compile(\"pow\\((([0-9][0-9]*\\.[0-9]*)|([0-9][0-9]*)|([a-zA-Z_][a-zA-Z0-9_]*)),\\s*(([0-9]*)|([a-zA-Z_][a-zA-Z0-9_]*))\\)\")\npattern_sqrt1 = re.compile(\"sqrt\\((int|float|bool|char|double|void|wchar_t)\\s*[a-zA-Z_][a-zA-Z0-9_]*\\)\")\npattern_sqrt2 = re.compile(\"sqrt\\([a-zA-Z_][a-zA-Z0-9_]*\\)\")\npattern_array_declaration = re.compile(\"(int|float|bool|char|double|void|wchar_t)\\s*[a-zA-Z_][a-zA-Z0-9_]*\\[[0-9][0-9]*\\];\")\npattern_initialize_array = re.compile(\"[a-zA-Z_][a-zA-Z0-9_]*\\[\\s*\\]\\s*=\")\npattern_null_pointer = re.compile(\"\\*[a-zA-Z_][a-zA-Z0-9_]*\\s*=\\s*NULL\")\npattern_pointer_to_pointer = re.compile(\"\\*\\*[a-zA-Z_][a-zA-Z0-9_]*\")\npattern_pass_pointer_to_function = re.compile(\"(int|float|bool|char|double|void|wchar_t)\\s*[a-zA-Z_][a-zA-Z0-9_]*\\((int|float|bool|char|double|void|wchar_t)\\s*\\*[a-zA-Z_][a-zA-Z0-9_]*\\)\")\npattern_return_pointer_from_function = re.compile(\"(int|float|bool|char|double|void|wchar_t)\\s*\\*\\s*[a-zA-Z_][a-zA-Z0-9_]*\\(\\s*\\)\")\npattern_public_inheritance = re.compile(\"class\\s*[A-Z][a-zA-Z0-9_]*\\s*:\\s*public\\s*[A-Z][a-zA-Z0-9_]*\")\npattern_protected_inheritance = re.compile(\"class\\s*[A-Z][a-zA-Z0-9_]*\\s*:\\s*protected\\s*[A-Z][a-zA-Z0-9_]*\")\npattern_private_inheritance = re.compile(\"class\\s*[A-Z][a-zA-Z0-9_]*\\s*:\\s*private\\s*[A-Z][a-zA-Z0-9_]*\")\npattern_public_member = re.compile(\"class\\s*[A-Z][a-zA-Z0-9_]*\\s*\\n\\n*\\{\\s*\\n\\n*\\s*public:\\s*\\n\\n*\\s*(int|float|bool|char|double|void|wchar_t)\\s*[a-zA-Z_][a-zA-Z0-9_]*;\")\npattern_private_member = re.compile(\"class\\s*[A-Z][a-zA-Z0-9_]*\\s*\\n\\n*\\{\\s*\\n\\n*\\s*private:\\s*\\n\\n*\\s*(int|float|bool|char|double|void|wchar_t)\\s*[a-zA-Z_][a-zA-Z0-9_]*;\")\npattern_protected_member = re.compile(\"class\\s*[A-Z][a-zA-Z0-9_]*\\s*\\n\\n*\\{\\s*\\n\\n*\\s*protected:\\s*\\n\\n*\\s*(int|float|bool|char|double|void|wchar_t)\\s*[a-zA-Z_][a-zA-Z0-9_]*;\")\n\n# comment\npattern_intro1 = re.compile(\"//\")\npattern_intro2 = re.compile(\"/\\*\\S*\\*/\")\n\n\ndef check_pattern(pattern, str, knowledge, rule_name, rule):\n res = pattern.match(str)\n if res is not None:\n knowledge[rule.index(rule_name)] = 1\n\ndef rule_match(str, rule, knowledge):\n\n if \"struct\" in str:\n knowledge[rule.index(\"['结构']\")]=1\n if \"class\" in str:\n knowledge[rule.index(\"['类']\")]=1\n if \"#include\" in str:\n knowledge[rule.index(\"['预处理指令#include']\")]=1\n if \"inline\" in str:\n knowledge[rule.index(\"['内置函数']\")]=1\n if \"iostream\" in str:\n knowledge[rule.index(\"['iostream']\")]=1\n if \"iomanip\" in str:\n knowledge[rule.index(\"['iomanip']\")]=1\n if \"namespace\" in str:\n knowledge[rule.index(\"['命名空间']\")]=1\n if \"char\" in str:\n knowledge[rule.index(\"['字符串']\")] = 1\n if \"string\" in str:\n knowledge[rule.index(\"['String串']\")] = 1\n if \"friend\" in str:\n knowledge[rule.index(\"['友元函数']\")] = 1\n if \"this->\" in str:\n knowledge[rule.index(\"['this指针']\")] = 1\n if \"operator\" in str:\n knowledge[rule.index(\"['运算符重载']\")] = 1\n if \"bool\" in str:\n knowledge[rule.index(\"['布尔型']\")] = 1\n if \"int\" in str:\n knowledge[rule.index(\"['整型']\")] = 1\n if \"float\" in str:\n knowledge[rule.index(\"['浮点型']\")] = 1\n if \"double\" in str:\n knowledge[rule.index(\"['双浮点型']\")] = 1\n if \"void\" in str:\n knowledge[rule.index(\"['无类型']\")] = 1\n if \"wchar_t\" in str:\n knowledge[rule.index(\"['宽字符串']\")] = 1\n if \"virtual\" in str:\n knowledge[rule.index(\"['虚函数']\")] = 1\n if \"true\" in str:\n knowledge[rule.index(\"['TRUE']\")] = 1\n if \"false\" in str:\n knowledge[rule.index(\"['FALSE']\")] = 1\n if \"\\a\" in str:\n knowledge[rule.index(\"['警报铃声']\")]=1\n if \"\\b\" in str:\n knowledge[rule.index(\"['退格键']\")]=1\n if \"\\f\" in str:\n knowledge[rule.index(\"['换页符']\")]=1\n if \"\\n\" in str:\n knowledge[rule.index(\"['换行符']\")]=1\n if \"\\r\" in str:\n knowledge[rule.index(\"['回车']\")]=1\n if \"\\t\" in str:\n knowledge[rule.index(\"['水平制表符']\")]=1\n if \"\\v\" in str:\n knowledge[rule.index(\"['垂直制表符']\")]=1\n if \"const\" in str:\n knowledge[rule.index(\"['const']\")] = 1\n if \"#define\" in str:\n knowledge[rule.index(\"['#define']\")] = 1\n if \"signed\" in str:\n knowledge[rule.index(\"['有符号型']\")] = 1\n if \"unsigned\" in str:\n knowledge[rule.index(\"['无符号型']\")] = 1\n if \"long\" in str:\n knowledge[rule.index(\"['长型']\")] = 1\n if \"short\" in str:\n knowledge[rule.index(\"['短型']\")] = 1\n if \"+\" in str:\n knowledge[rule.index(\"['算术运算符']\")] = 1\n if \"-\" in str:\n knowledge[rule.index(\"['算术运算符']\")] = 1\n if \"*\" in str:\n knowledge[rule.index(\"['算术运算符']\")] = 1\n if \"/\" in str:\n knowledge[rule.index(\"['算术运算符']\")] = 1\n if \"%\" in str:\n knowledge[rule.index(\"['算术运算符']\")] = 1\n if \"++\" in str:\n knowledge[rule.index(\"['算术运算符']\")] = 1\n if \"--\" in str:\n knowledge[rule.index(\"['算术运算符']\")] = 1\n if \"==\" in str:\n knowledge[rule.index(\"['关系运算符']\")] = 1\n if \"!=\" in str:\n knowledge[rule.index(\"['关系运算符']\")] = 1\n if \"<\" in str:\n knowledge[rule.index(\"['关系运算符']\")] = 1\n if \">\" in str:\n knowledge[rule.index(\"['关系运算符']\")] = 1\n if \"<=\" in str:\n knowledge[rule.index(\"['关系运算符']\")] = 1\n if \">=\" in str:\n knowledge[rule.index(\"['关系运算符']\")] = 1\n if \"&&\" in str:\n knowledge[rule.index(\"['逻辑运算符']\")] = 1\n if \"||\" in str:\n knowledge[rule.index(\"['逻辑运算符']\")] = 1\n if \"!\" in str:\n knowledge[rule.index(\"['逻辑运算符']\")] = 1\n if \"&\" in str:\n knowledge[rule.index(\"['位运算符']\")] = 1\n if \"|\" in str:\n knowledge[rule.index(\"['位运算符']\")] = 1\n if \"^\" in str:\n knowledge[rule.index(\"['位运算符']\")] = 1\n if \"~\" in str:\n knowledge[rule.index(\"['位运算符']\")] = 1\n if \"<<\" in str:\n knowledge[rule.index(\"['位运算符']\")] = 1\n if \">>\" in str:\n knowledge[rule.index(\"['位运算符']\")] = 1\n if \"=\" in str:\n knowledge[rule.index(\"['赋值运算符']\")] = 1\n if \"+=\" in str:\n knowledge[rule.index(\"['赋值运算符']\")] = 1\n if \"-=\" in str:\n knowledge[rule.index(\"['赋值运算符']\")] = 1\n if \"*=\" in str:\n knowledge[rule.index(\"['赋值运算符']\")] = 1\n if \"/=\" in str:\n knowledge[rule.index(\"['赋值运算符']\")] = 1\n if \"%=\" in str:\n knowledge[rule.index(\"['赋值运算符']\")] = 1\n if \"<<=\" in str:\n knowledge[rule.index(\"['赋值运算符']\")] = 1\n if \">>=\" in str:\n knowledge[rule.index(\"['赋值运算符']\")] = 1\n if \"&=\" in str:\n knowledge[rule.index(\"['赋值运算符']\")] = 1\n if \"|=\" in str:\n knowledge[rule.index(\"['赋值运算符']\")] = 1\n if \"^=\" in str:\n knowledge[rule.index(\"['赋值运算符']\")] = 1\n if \"sizeof\" in str:\n knowledge[rule.index(\"['杂项运算符']\")] = 1\n if \"auto\" in str:\n knowledge[rule.index(\"['auto存储类']\")] = 1\n if \"register\" in str:\n knowledge[rule.index(\"['register存储类']\")] = 1\n if \"static\" in str:\n knowledge[rule.index(\"['static存储类']\")] = 1\n if \"extem\" in str:\n knowledge[rule.index(\"['extern存储类']\")] = 1\n if \"mutable\" in str:\n knowledge[rule.index(\"['mutable存储类']\")] = 1\n if \"thread_local\" in str:\n knowledge[rule.index(\"['thread_local存储类']\")] = 1\n if \"while\" in str:\n knowledge[rule.index(\"['while循环']\")] = 1\n if \"for\" in str:\n knowledge[rule.index(\"['for循环']\")] = 1\n if \"break\" in str:\n knowledge[rule.index(\"['break语句']\")] = 1\n if \"continue\" in str:\n knowledge[rule.index(\"['continue语句']\")] = 1\n if \"goto\" in str:\n knowledge[rule.index(\"['goto语句']\")] = 1\n if \"if\" in str:\n knowledge[rule.index(\"['if语句']\")] = 1\n if \"switch\" in str:\n knowledge[rule.index(\"['switch语句']\")] = 1\n if \"main\" in str:\n knowledge[rule.index(\"['主函数main()']\")] = 1\n if \"srand\" in str:\n knowledge[rule.index(\"['随机数函数']\")] = 1\n if \"rand\" in str:\n knowledge[rule.index(\"['随机数函数']\")] = 1\n if \"strcpy\" in str:\n knowledge[rule.index(\"['复制字符串s2到字符串s1函数']\")] = 1\n if \"strcat\" in str:\n knowledge[rule.index(\"['连接字符串s2到字符串s1的末尾函数']\")] = 1\n if \"strlen\" in str:\n knowledge[rule.index(\"['返回字符串s1的长度函数']\")] = 1\n if \"strcmp\" in str:\n knowledge[rule.index(\"['比较字符串s1与s2的长度并返回相应值的函数']\")] = 1\n if \"strchr\" in str:\n knowledge[rule.index(\"['返回指针并指向s1中字符ch第一次出现的位置函数']\")] = 1\n if \"strstr\" in str:\n knowledge[rule.index(\"['返回指针并指向s1中字符串s2第一次出现的位置函数']\")] = 1\n if \"fstream\" in str:\n knowledge[rule.index(\"['fstream']\")] = 1\n if \"cin\" in str:\n knowledge[rule.index(\"['标准输入流cin']\")] = 1\n if \"cout\" in str:\n knowledge[rule.index(\"['标准输出流cout']\")] = 1\n if \"cerr\" in str:\n knowledge[rule.index(\"['cerr']\")] = 1\n if \"clog\" in str:\n knowledge[rule.index(\"['clog']\")] = 1\n if \"setw\" in str:\n knowledge[rule.index(\"['setw']\")] = 1\n if \"setprecision\" in str:\n knowledge[rule.index(\"['setprecision']\")] = 1\n if \"using\" in str:\n knowledge[rule.index(\"['using指令']\")] = 1\n if \"printf\" in str:\n knowledge[rule.index(\"['c++库函数printf()']\")] = 1\n if \"scanf\" in str:\n knowledge[rule.index(\"['c++库函数scanf()']\")] = 1\n if \"enum\" in str:\n knowledge[rule.index(\"['枚举类型']\")] = 1\n\n check_pattern(pattern_deconstruct, str, knowledge, \"['构造器']\", rule)\n check_pattern(pattern_point, str, knowledge, \"['指针']\", rule)\n check_pattern(pattern_memory_address, str, knowledge, \"['内存地址']\", rule)\n check_pattern(pattern_func, str, knowledge, \"['类成员函数']\", rule)\n check_pattern(pattern_array, str, knowledge, \"['访问数组元素']\", rule)\n check_pattern(pattern_multiarray, str, knowledge, \"['多维数组']\", rule)\n check_pattern(pattern_datamember, str, knowledge, \"['数据成员']\", rule)\n check_pattern(pattern_pass_value_call, str, knowledge, \"['传值调用']\", rule)\n check_pattern(pattern_variable_declaration1, str, knowledge, \"['变量声明']\", rule)\n check_pattern(pattern_variable_declaration2, str, knowledge, \"['变量声明']\", rule)\n check_pattern(pattern_object, str, knowledge, \"['对象']\", rule)\n check_pattern(pattern_function_declaration, str, knowledge, \"['函数声明']\", rule)\n check_pattern(pattern_base_class, str, knowledge, \"['基类']\", rule)\n check_pattern(pattern_string, str, knowledge, \"['字符串']\", rule)\n check_pattern(pattern_variable_definitions, str, knowledge, \"['变量定义']\", rule)\n check_pattern(pattern_function_definitions, str, knowledge, \"['变量定义']\", rule)\n check_pattern(pattern_return_type, str, knowledge, \"['返回类型']\", rule)\n check_pattern(pattern_array_as_parameter1, str, knowledge, \"['数组作为参数']\", rule)\n check_pattern(pattern_array_as_parameter2, str, knowledge, \"['数组作为参数']\", rule)\n check_pattern(pattern_pointer_variable_declaration, str, knowledge, \"['指针变量的声明']\", rule)\n check_pattern(pattern_pointer_array, str, knowledge, \"['指针数组']\", rule)\n check_pattern(pattern_reference_statement, str, knowledge, \"['引用声明']\", rule)\n check_pattern(pattern_pointer_to_structure, str, knowledge, \"['指向结构的指针']\", rule)\n check_pattern(pattern_string, str, knowledge, \"['String串']\", rule)\n check_pattern(pattern_derived_class, str, knowledge, \"['派生类'']\", rule)\n check_pattern(pattern_pure_virtual_function, str, knowledge, \"['纯虚函数']\", rule)\n check_pattern(pattern_hexadecimal, str, knowledge, \"['整数常量-十六进制']\", rule)\n check_pattern(pattern_octal, str, knowledge, \"['整数常量-八进制']\", rule)\n check_pattern(pattern_decimal, str, knowledge, \"['整数常量-十进制']\", rule)\n check_pattern(pattern_float_number, str, knowledge, \"['浮点常量-小数]\", rule)\n check_pattern(pattern_float_e, str, knowledge, \"['浮点常量-e表示小数']\", rule)\n check_pattern(pattern_float_E, str, knowledge, \"['浮点常量-e表示小数]\", rule)\n check_pattern(pattern_str, str, knowledge, \"['字符常量']\", rule)\n check_pattern(pattern_miscellaneous_operator_choose, str, knowledge, \"['杂项运算符-选择运算']\", rule)\n check_pattern(pattern_miscellaneous_operator_comma, str, knowledge, \"['杂项运算符-逗号运算']\", rule)\n check_pattern(pattern_miscellaneous_data_type_cast, str, knowledge, \"['杂项运算符-强制转换数据类型']\", rule)\n check_pattern(pattern_do_while, str, knowledge, \"['do-while循环']\", rule)\n check_pattern(pattern_if_else, str, knowledge, \"['if-else循环']\", rule)\n check_pattern(pattern_formal_parameter, str, knowledge, \"['形式参数']\", rule)\n check_pattern(pattern_actual_parameter1, str, knowledge, \"['实际参数']\", rule)\n check_pattern(pattern_actual_parameter2, str, knowledge, \"['实际参数']\", rule)\n check_pattern(pattern_pointer_call, str, knowledge, \"['指针调用']\", rule)\n check_pattern(pattern_reference_call, str, knowledge, \"['引用调用']\", rule)\n check_pattern(pattern_cos1, str, knowledge, \"['余弦函数']\", rule)\n check_pattern(pattern_cos2, str, knowledge, \"['余弦函数']\", rule)\n check_pattern(pattern_sin1, str, knowledge, \"['正弦函数']\", rule)\n check_pattern(pattern_sin2, str, knowledge, \"['正弦函数']\", rule)\n check_pattern(pattern_tan1, str, knowledge, \"['正切函数']\", rule)\n check_pattern(pattern_tan2, str, knowledge, \"['正切函数']\", rule)\n check_pattern(pattern_log1, str, knowledge, \"['对数函数']\", rule)\n check_pattern(pattern_log2, str, knowledge, \"['对数函数']\", rule)\n check_pattern(pattern_pow, str, knowledge, \"['指数函数']\", rule)\n check_pattern(pattern_sqrt1, str, knowledge, \"['平方根函数']\", rule)\n check_pattern(pattern_sqrt2, str, knowledge, \"['平方根函数']\", rule)\n check_pattern(pattern_array_declaration, str, knowledge, \"['数组声明']\", rule)\n check_pattern(pattern_initialize_array, str, knowledge, \"['初始化数组']\", rule)\n check_pattern(pattern_null_pointer, str, knowledge, \"['空指针']\", rule)\n check_pattern(pattern_pointer_to_pointer, str, knowledge, \"['指向指针的指针']\", rule)\n check_pattern(pattern_pass_pointer_to_function, str, knowledge, \"['传递指针给函数']\", rule)\n check_pattern(pattern_return_pointer_from_function, str, knowledge, \"['从函数返回指针']\", rule)\n check_pattern(pattern_public_inheritance, str, knowledge, \"['公有继承']\", rule)\n check_pattern(pattern_protected_inheritance, str, knowledge, \"['保护继承']\", rule)\n check_pattern(pattern_private_inheritance, str, knowledge, \"['私有继承']\", rule)\n check_pattern(pattern_public_member, str, knowledge, \"['公有成员']\", rule)\n check_pattern(pattern_private_member, str, knowledge, \"['私有成员']\", rule)\n check_pattern(pattern_protected_member, str, knowledge, \"['私有成员']\", rule)\n\n return knowledge\n\n\nif __name__ == '__main__':\n df_answer = pd.read_csv(\"./challenge_answer.csv\")\n with open(\"./code_knowledge.pkl\",\"rb\") as file:\n code_knowledge = pickle.load(file)[0:181]\n result_rule = []\n\n for i, row in df_answer.iterrows():\n knowledge = [0]*181\n if isinstance(row[\"contents\"], str):\n content_array = row[\"contents\"].split(\"\\n\")\n tmpstr = ''\n for content_str in content_array:\n content_str = content_str.strip(\" \")\n intro = pattern_intro1.match(content_str)\n if intro is not None:\n if intro.pos == 0:\n continue\n else:\n content_str = content_str[:intro.pos]\n if len(content_str) == 0:\n continue\n tmpstr = tmpstr+content_str\n reslist = pattern_intro2.findall(tmpstr)\n if len(reslist) != 0:\n for res in reslist:\n tmpstr = tmpstr.replace(res,\"\")\n knowledge = rule_match(tmpstr, code_knowledge, knowledge)\n\n result_rule.append(knowledge)\n with open(\"./code_rule.pkl\",\"wb\") as file:\n pickle.dump(result_rule, file)\n","repo_name":"Eleanorbai/my-experiment","sub_path":"double-layer network/collaboration layer/data/raw_patternl.py","file_name":"raw_patternl.py","file_ext":"py","file_size_in_byte":22938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1250115209","text":"#!/usr/bin/python3\n\"\"\"Base Module definition\"\"\"\n\n\nclass Base:\n \"\"\"Defines a Class called Base for other classes to inherit from\n Args:\n id (int): integer\n __nb_objects (int): private class attribute\n \"\"\"\n __nb_objects = 0\n\n\n def __init__(self, id=None):\n \"\"\"initialises the Base class\"\"\"\n if id is not None:\n self.id = id\n else:\n __nb_objects += 1\n self.id = __nb_objects\n","repo_name":"LowellUfot/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36741386096","text":"import torch\nimport torch.nn.functional as F\nimport numpy as np\nimport Utils.utils as utils\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n\ncos_similarity = torch.nn.CosineSimilarity(dim=1, eps=1e-6)\n\ndef class_balanced_cross_entropy_loss(output, label, size_average=True, batch_average=True, void_pixels=None):\n \"\"\"Define the class balanced cross entropy loss to train the network\n Args:\n output: Output of the network\n label: Ground truth label\n size_average: return per-element (pixel) average loss\n batch_average: return per-batch average loss\n void_pixels: pixels to ignore from the loss\n Returns:\n Tensor that evaluates the loss\n \"\"\"\n\n assert(output.size() == label.size())\n\n labels = torch.ge(label, 0.5).float()\n\n num_labels_pos = torch.sum(labels)\n num_labels_neg = torch.sum(1.0 - labels)\n num_total = num_labels_pos + num_labels_neg\n\n output_gt_zero = torch.ge(output, 0).float()\n loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(\n 1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))\n\n loss_pos_pix = -torch.mul(labels, loss_val)\n loss_neg_pix = -torch.mul(1.0 - labels, loss_val)\n\n if void_pixels is not None:\n w_void = torch.le(void_pixels, 0.5).float()\n loss_pos_pix = torch.mul(w_void, loss_pos_pix)\n loss_neg_pix = torch.mul(w_void, loss_neg_pix)\n num_total = num_total - torch.ge(void_pixels, 0.5).float().sum()\n\n loss_pos = torch.sum(loss_pos_pix)\n loss_neg = torch.sum(loss_neg_pix)\n\n final_loss = num_labels_neg / num_total * loss_pos + num_labels_pos / num_total * loss_neg\n\n if size_average:\n final_loss /= np.prod(label.size())\n elif batch_average:\n final_loss /= label.size()[0]\n\n return final_loss\n\ndef fp_edge_loss(gt_edges, edge_logits):\n \"\"\"\n Edge loss in the first point network\n\n gt_edges: [batch_size, grid_size, grid_size] of 0/1\n edge_logits: [batch_size, grid_size*grid_size]\n \"\"\"\n edges_shape = gt_edges.size()\n gt_edges = gt_edges.view(edges_shape[0], -1)\n\n loss = F.binary_cross_entropy_with_logits(edge_logits, gt_edges)\n\n return torch.mean(loss)\n\ndef fp_vertex_loss(gt_verts, vertex_logits):\n \"\"\"\n Vertex loss in the first point network\n \n gt_verts: [batch_size, grid_size, grid_size] of 0/1\n vertex_logits: [batch_size, grid_size**2]\n \"\"\"\n verts_shape = gt_verts.size()\n gt_verts = gt_verts.view(verts_shape[0], -1)\n\n loss = F.binary_cross_entropy_with_logits(vertex_logits, gt_verts)\n\n return torch.mean(loss)\n\n\n\ndef poly_mathcing_loss(pnum, pred, gt, loss_type=\"L2\"):\n '''\n :param pnum: the number of spline vertices\n :param pred: [bs, pnum, 2 ] \\in (0,1)\n :param gt: [bs, pnum, 2 ] \\in (0,1)\n :param loss_type:\n :return:\n '''\n\n batch_size = pred.size()[0]\n pidxall = np.zeros(shape=(batch_size, pnum, pnum), dtype=np.int32)\n for b in range(batch_size):\n for i in range(pnum):\n pidx = (np.arange(pnum) + i) % pnum\n pidxall[b, i] = pidx\n\n pidxall = torch.from_numpy(np.reshape(pidxall, newshape=(batch_size, -1))).to(device)\n\n # import ipdb;\n # ipdb.set_trace()\n feature_id = pidxall.unsqueeze_(2).long().expand(pidxall.size(0), pidxall.size(1), gt.size(2)).detach()\n gt_expand = torch.gather(gt, 1, feature_id).view(batch_size, pnum, pnum, 2)\n\n pred_expand = pred.unsqueeze(1)\n\n dis = pred_expand - gt_expand ##return the distance from pred_expand to gt_expand\n\n\n if loss_type == \"L2\":\n dis = (dis ** 2).sum(3).sqrt().sum(2)\n elif loss_type == \"L1\":\n dis = torch.abs(dis).sum(3).sum(2)\n\n min_dis, min_id = torch.min(dis, dim=1, keepdim=True)\n min_id = torch.from_numpy(min_id.data.cpu().numpy()).to(device)\n\n min_gt_id_to_gather = min_id.unsqueeze_(2).unsqueeze_(3).long().\\\n expand(min_id.size(0), min_id.size(1), gt_expand.size(2), gt_expand.size(3))\n gt_right_order = torch.gather(gt_expand, 1, min_gt_id_to_gather).view(batch_size, pnum, 2)\n\n return gt_right_order, torch.mean(min_dis)\n\ndef poly_match_interactive(pnum, pred, gt, loss_type=\"L2\"):\n '''\n :param pnum: the number of spline vertices\n :param pred: [bs, pnum, 2 ] \\in (0,1)\n :param gt: [bs, pnum, 2 ] \\in (0,1)\n :param loss_type:\n :return:\n '''\n\n batch_size = pred.size()[0]\n pidxall = np.zeros(shape=(batch_size, pnum, pnum), dtype=np.int32)\n for b in range(batch_size):\n for i in range(pnum):\n pidx = (np.arange(pnum) + i) % pnum\n pidxall[b, i] = pidx\n\n pidxall = torch.from_numpy(np.reshape(pidxall, newshape=(batch_size, -1))).to(device)\n\n # import ipdb;\n # ipdb.set_trace()\n feature_id = pidxall.unsqueeze_(2).long().expand(pidxall.size(0), pidxall.size(1), gt.size(2)).detach() #(bs, pnum*pnum, 2)\n gt_expand = torch.gather(gt, 1, feature_id).view(batch_size, pnum, pnum, 2) #(bs, pnum,pnum,2)\n\n pred_expand = pred.unsqueeze(1) #(bs,1,pnum,2)\n\n dis = pred_expand - gt_expand # [bs,pnum,pnum,2] ##return the distance from pred_expand to gt_expand\n\n\n if loss_type == \"L2\":\n dis = (dis ** 2).sum(3).sqrt().sum(2)\n elif loss_type == \"L1\":\n ## note: bellow code calc the sum of each match, and calc the sum of match\n # pre is stable, gt is looped\n dis_match = torch.abs(dis).sum(3) # (2, 40, 40)\n dis = torch.abs(dis).sum(3).sum(2) # (bs,40)\n\n min_dis, min_id = torch.min(dis, dim=1, keepdim=True)\n shortest_match = torch.zeros(gt.shape[0], 1, pnum)\n # shortest_match = dis_match[:,min_id,:]\n for i in range(gt.shape[0]):\n shortest_match[i,0,:] = dis_match[i,min_id[i,0], :] # (bs,1, pnum)\n final_max_dis, final_max_dis_id = torch.max(shortest_match, dim=2) # find the \"minmax\" distance, final_max_dis:(bs,1), final_max_dis_id:(bs,1)\n final_match = torch.zeros(gt.shape[0], 1, pnum, 2)\n for i in range(gt.shape[0]):\n final_match[i,0,:,:] = gt_expand[i,min_id[i,0],:,:]\n\n\n return final_match, final_max_dis_id #final_match: (bs, 1, pnum, 2) ; final_max_dis_id: (bs, 1)\n\ndef GeneralizedDice(probs, onehot_labels):\n '''\n :param probs: b2wh, the probs of last CNN layer input to F.log_softmax()\n :param onehot_labels: one-hot operation labels\n :return:\n '''\n #assert utils.checkSimplex_(probs) and utils.checkSimplex_(onehot_labels)\n idc = [0, 1]\n #pc = probs[:, idc, ...].type(torch.float32) #pc: bcwh\n pc = probs.type(torch.float32)\n #tc = onehot_labels[:, idc, ...].type(torch.float32)\n tc = onehot_labels #convert ndarray to Tensor\n pc_ = torch.zeros(pc.shape[0], pc.shape[1]).cuda() # bc\n tc_ = torch.zeros(tc.shape[0], tc.shape[1]).cuda() #bc\n ## intersection = torch.einsum('bcwh, bcwh -> bc', [pc, tc])\n temp = F.mul(tc, pc) #bcwh\n intersection_ = torch.zeros(pc.shape[0], pc.shape[1])\n ## bellow operation equals as\n ## pc_= torch.einsum('bcwh', [pc]) tc_ = torch.einsum('bcwh -> bc', [tc])\n for vi in range(pc.shape[0]):\n for vj in range(pc.shape[1]):\n pc_[vi][vj] = torch.sum(pc[vi,vj,...])\n tc_[vi][vj] = torch.sum(tc[vi,vj,:,:])\n intersection_ = torch.sum(temp[vi, vj,:,:]) #intersection of pre mask and GT mask\n w = 1 / ((pc_.type(torch.float32) + 1e-10) ** 2)\n intersection = w * intersection_\n union = w * (pc_ + tc_) # union of pre mask and GT mask\n\n divided = 1-2 * (torch.sum(intersection, 1) + 1e-10) / (torch.sum(union, 1) + 1e-10)\n #loss = divided.mean()\n loss = torch.mean(divided)\n return loss\n\n\ndef SurfaceLoss(probs, dis_map):\n #assert utils.checkSimplex_(probs)\n assert not utils.one_hot(dis_map) #if false throw exception e\n idc = [1]\n #pc = probs[:, idc, ...].type(torch.float32) #bcwh\n pc = probs[:,1,:].type(torch.float32)\n dc = dis_map[:, 1, ...] #bcwh\n\n multipled = F.mul(pc, dc) #bcwh, equal to 'torch.einsum('bcwh, bcwh -> bcwh', [pc, dc])'\n #loss = multipled.mean()\n loss = torch.mean(multipled)\n return loss","repo_name":"AlanMorningLight/GCN-Based-Interactive-Prostate-Segmentationon-on-MR-Images","sub_path":"GCN-Based-Interactive-Prostate-Segmentationon-on-MR-Images/code/Evaluation/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":8055,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"} +{"seq_id":"39090008960","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def swapPairs(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if not head or not head.next:\n return head\n temp = head.next\n head.next = self.swapPairs(head.next.next)\n temp.next = head\n return temp\n ","repo_name":"Shauro/LeetCode","sub_path":"OJ/List/24. Swap Nodes in Pairs/24_recursive.py","file_name":"24_recursive.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"18106736585","text":"#!E:/anaconda env python\n# -*- coding: utf-8 -*-\n# @Date : 2018-07-21 14:07:48\n# @Author : 1nsane SUN\n# @Email : 771499532@qq.com\n# @Link : pass\n# @Version : $Id$\n\nimport wx\n\nclass CheckListBoxFrame(wx.Frame):\n\t\"\"\"docstring for CheckListBoxFrame\"\"\"\n\tdef __init__(self):\n\t\twx.Frame.__init__(self,None,-1,'Choice Example',\n\t\t\tsize=(250,200))\n\t\tpanel = wx.Panel(self,-1)\n\t\tsampleList = ['1','2','3','4','5']\n\t\twx.StaticText(panel,-1,'Select one:',(15,20))\n\t\twx.Choice(panel,-1,(85,18),choices=sampleList)\n\nif __name__ == '__main__':\n\tapp = wx.PySimpleApp()\n\tCheckListBoxFrame().Show()\n\tapp.MainLoop()\n","repo_name":"EriliSun/pyProject","sub_path":"CheckListBox.py","file_name":"CheckListBox.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5158262875","text":"import math\r\n\r\ndef cycleLength(n):\r\n remainders = [1]\r\n exponent = 1\r\n r = 1\r\n while True:\r\n modresult = (r*(math.pow(10,exponent))) % n\r\n if modresult == 0: return -1 #not a repeating decimal\r\n if modresult == r*(10^exponent):\r\n exponent += 1\r\n remainders.append(modresult)\r\n else:\r\n already_seen = remainders.count(modresult)\r\n if already_seen != 0:\r\n remainder_location = remainders.index(modresult)\r\n return len(remainders) - remainder_location\r\n else:\r\n remainders.append(modresult)\r\n r = modresult\r\n exponent = 1\r\n \r\nmax_length = 0\r\nmax_d = 0\r\nfor i in range(2,1000):\r\n cur_length = cycleLength(i)\r\n if cur_length > max_length:\r\n max_length = cur_length\r\n max_d = i\r\nprint(max_d)","repo_name":"mirekkukla/project-euler","sub_path":"src/p26.py","file_name":"p26.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72353006025","text":"from extractLogs import extractLogs\nfrom dispatcher import dispatcher\nfrom controllerHelpers import *\nfrom auditLog import AuditLog\nfrom ballotImage import BallotImage\nfrom el68a import EL68A\n\ndef index():\n form = FORM(\n 'Zipped File:', INPUT(_name='zipped_files', _type='file'),\n INPUT(_type='submit'), _action='results')\n\n return dict(message=None, form=form)\n\n# all the results\ndef results():\n request.vars.zipped_files.file.seek(0)\n el152, el155, el68a = extractLogs([request.vars.zipped_files.file])\n del request.vars.zipped_files\n\n # create parsed logs and delete files...\n p_el152 = p_el155 = p_el68a = None\n if el152:\n p_el152 = AuditLog(el152)\n del el152\n if el155 and el68a:\n p_el68a = EL68A(el68a)\n del el68a\n p_el155 = BallotImage(el155, p_el152, p_el68a)\n del el155\n elif el155 and not el68a:\n p_el155 = BallotImage(el155, p_el152)\n del el155\n \n # parsed logs are passed to dispatcher\n dictionary = dispatcher(el152=p_el152, el155=p_el155, el68a=p_el68a)\n\n if dictionary['message'] != 'LOLCAT':\n generateImageIDs(dictionary['results'])\n generateTags(dictionary['results'])\n \n return dictionary\n\ndef about():\n return dict(message='')\n\ndef privacy():\n return dict(message='')\n\ndef contact():\n return dict(message='')\n\n# stream requested image to browser\ndef histogram():\n imageID = request.args(0).split('.')[0]\n data = session.vcImageMap[imageID]\n data.seek(0)\n return response.stream(data)\n \n# send requested image to browser (for download)\ndef histogram_download():\n imageID = request.args(0).split('.')[0]\n filename = 'graph_' + imageID + '.png'\n response.headers['Content-Disposition']='attachment; filename='+filename\n data = session.vcImageMap[imageID]\n data.seek(0)\n return response.stream(data)\n\n# setEmbedTags and populate session.vcImageMap['imageID'] -> ImageData\ndef generateTags(reports):\n session.vcImageMap = {}\n\n for report in reports:\n if report.hasImages():\n for image in report.getImagesList():\n session.vcImageMap[image.getImageID()] = image.getData()\n tag = A(\n IMG(_src=URL(r=request, f='histogram/' + image.getImageID() + '.png'),\n _alt=''+image.getImageID(), \n _width=640\n ),\n _href=URL(r=request, f='histogram_download/' + image.getImageID() + '.png')\n )\n image.setEmbedTags(tag)\n \n","repo_name":"davidwagner/audit-bear","sub_path":"web2py/applications/audit_bear/controllers/default2.py","file_name":"default2.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19457132553","text":"import os,sys\nimport shutil\n#print(os.listdir('./1'))\ndef copyFiles(sourcefile,targetDir):\n try:\n shutil.copyfile(sourcefile,targetDir)\n except IOError as e:\n print(\"Unable to copy file. %s\" % e)\n exit(1)\n except:\n print(\"Unexpected error:\", sys.exc_info())\n exit(1)\n\npath = './1/'\nlist = os.listdir(path)\nprint(list)\nos.rename(path + list[0],path + list[0]+'231')\ncopyFiles(path + list[0]+'231','./'+list[0]+'231')\n","repo_name":"donglinrui/Leetcode","sub_path":"python_test/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9088374220","text":"from typing import Iterable\nfrom typing import Optional\n\nfrom launch.actions import Shutdown\nfrom launch.frontend import expose_action\nfrom launch.some_substitutions_type import SomeSubstitutionsType\nfrom launch.substitutions import LaunchConfiguration\n\nfrom launch_ros.actions import Node\nfrom launch_ros.parameters_type import SomeParameters\nfrom launch_ros.remap_rule_type import SomeRemapRules\n\n\n@expose_action('catch2_integration_test_node')\nclass Catch2IntegrationTestNode(Node):\n \"\"\"\n A wrapper around launch_ros.actions.Node for integration test nodes.\n\n Passes the \"result_file\" argument to Catch2 and shuts down on exit.\n \"\"\"\n\n def __init__(\n self, *,\n executable: SomeSubstitutionsType,\n package: Optional[SomeSubstitutionsType] = None,\n name: Optional[SomeSubstitutionsType] = None,\n namespace: Optional[SomeSubstitutionsType] = None,\n exec_name: Optional[SomeSubstitutionsType] = None,\n parameters: Optional[SomeParameters] = None,\n remappings: Optional[SomeRemapRules] = None,\n ros_arguments: Optional[Iterable[SomeSubstitutionsType]] = None,\n arguments: Optional[Iterable[SomeSubstitutionsType]] = None,\n output: SomeSubstitutionsType = 'screen',\n **kwargs\n ) -> None:\n\n # Add arguments for Catch\n arguments_appended = [\n '--reporter',\n ['JUnit::out=', LaunchConfiguration('result_file')],\n '--reporter',\n 'console::out=-::colour-mode=ansi'\n ]\n\n if arguments:\n arguments_appended += arguments\n\n super().__init__(\n executable=executable,\n package=package,\n name=name,\n namespace=namespace,\n exec_name=exec_name,\n parameters=parameters,\n remappings=remappings,\n ros_arguments=ros_arguments,\n arguments=arguments_appended,\n on_exit=Shutdown(),\n output=output,\n **kwargs\n )\n","repo_name":"ngmor/catch_ros2","sub_path":"launch_catch_ros2/catch2_integration_test_node.py","file_name":"catch2_integration_test_node.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"73954489224","text":"import numpy as np\r\nimport networkx as nx\r\nfrom sklearn.cluster import KMeans\r\n\r\n\r\ndef normalize_dict(xy):\r\n X = np.array([val for val in xy.values()])\r\n norm_2 = np.sqrt((X ** 2).sum(axis=0))\r\n return {key: xy[key] / norm_2 for key in xy.keys()}\r\n\r\n\r\nif __name__ == \"__main__\":\r\n NUM_CLUSTERS = 8\r\n NUM_OPINION_LEADERS = 10\r\n\r\n embedding = np.loadtxt(\"./embeddings/embeddings_tgn_UCI.txt\")\r\n kmeans = KMeans(n_clusters=NUM_CLUSTERS) # n_clusters:number of cluster\r\n kmeans.fit(embedding)\r\n\r\n labels = kmeans.labels_\r\n\r\n value_cnt = {} \r\n for label in labels: \r\n value_cnt[label] = value_cnt.get(label, 0) + 1\r\n print(value_cnt)\r\n\r\n G = nx.Graph()\r\n for i in range(len(embedding)):\r\n G.add_node(i + 1)\r\n\r\n scores = {}\r\n for i in range(len(embedding)):\r\n scores[i + 1] = 0\r\n\r\n start_time = 1082008561\r\n end_time = 1098744742 # 1085869012\r\n interval = 3600 * 24 * 14\r\n num_interval = int((end_time - start_time) / interval) + 1\r\n print('Divided into {} intervals'.format(num_interval))\r\n timestep = 0\r\n for time in range(start_time, end_time, interval):\r\n if time + interval > end_time:\r\n break\r\n G.remove_edges_from(G.edges)\r\n with open(\"./graphs/uc_irv_edges.txt\", 'r', encoding=\"utf-8\") as f:\r\n for line in f.readlines():\r\n data = line.split(\" \")\r\n edge_time = int(data[3].strip('\\n'))\r\n if time <= edge_time < time + interval:\r\n G.add_edge(int(data[0]), int(data[1]))\r\n betweenness = normalize_dict(nx.betweenness_centrality(G, normalized=False))\r\n degree = normalize_dict(nx.degree_centrality(G))\r\n closeness = normalize_dict(nx.closeness_centrality(G))\r\n for score in scores:\r\n scores[score] += 1 * betweenness[score] * (1 - (timestep / num_interval) ** 2)\r\n scores[score] += 1 * degree[score] * (1 - (timestep / num_interval) ** 2)\r\n scores[score] += 1 * closeness[score] * (1 - (timestep / num_interval) ** 2)\r\n timestep += 1\r\n\r\n best_cluster = -1\r\n highest_score = -1\r\n for label in value_cnt:\r\n if value_cnt[label] > 600:\r\n continue\r\n else:\r\n cluster_score = 0\r\n for i in range(len(labels)):\r\n if labels[i] == label:\r\n cluster_score += scores[i + 1]\r\n print(\"Cluster:\", label, \", score=\", cluster_score / value_cnt[label])\r\n if cluster_score / value_cnt[label] > highest_score:\r\n highest_score = cluster_score / value_cnt[label]\r\n best_cluster = label\r\n print(\"Best cluster:\", best_cluster, \", score=\", highest_score)\r\n candidate_node = {}\r\n for i in range(len(labels)):\r\n if labels[i] == best_cluster:\r\n candidate_node[i + 1] = scores[i + 1]\r\n node_ranking = sorted(candidate_node.items(), key=lambda x: x[1], reverse=True)\r\n print(\"Ranking of candidate_node:\")\r\n final_rank = []\r\n for n in node_ranking:\r\n final_rank.append(n[0])\r\n print(final_rank[0:NUM_OPINION_LEADERS])\r\n","repo_name":"YunmingHui/Leveraging-Graph-Embeddings-for-Dynamic-Opinion-Leader-Detection","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31793370417","text":"\"\"\"\nYou are given the heads of two sorted linked lists list1 and list2.\n\nMerge the two lists into one sorted list. The list should be made by splicing together the nodes of the first two lists.\n\nReturn the head of the merged linked list.\n\"\"\"\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\ndef mergeTwoLists(self, list1, list2):\n #Create a head\n head = ListNode()\n #create a pointer\n pointer = head\n\n # Traverse both lists and compare values\n # add the smaller value in the pointer\n while list1 and list2:\n if list1.val < list2.val:\n pointer.next = list1\n list1 = list1.next\n else:\n pointer.next = list2\n list2 = list2.next\n pointer = pointer.next\n\n # When one list is finished, we do not need to iterate, just add the remaining list\n # since it's already sorted\n pointer.next = list1 or list2\n \n return head.next","repo_name":"abrvg/my_algo_notes","sub_path":"001-100/LC021_MergeTwoSortedLists.py","file_name":"LC021_MergeTwoSortedLists.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33833348148","text":"from yaml import safe_dump\nfrom uuid import UUID\nfrom mimetypes import guess_type\nfrom io import BytesIO\n\nfrom flask import Blueprint, Response, request, render_template, current_app, url_for\nfrom jinja2 import Markup\nfrom pygments.formatters import HtmlFormatter\nfrom pygments.lexers import get_all_lexers\n\nfrom pb.paste import model, handler as _handler\nfrom pb.util import highlight, redirect, request_content, id_url, rst, markdown, any_url\n\npaste = Blueprint('paste', __name__)\n\n@paste.app_template_filter(name='rst')\ndef filter_rst(source):\n return Markup(rst(source))\n\n@paste.app_template_filter(name='markdown')\ndef filter_rst(source):\n return Markup(markdown(source))\n\n@paste.app_template_global()\ndef include_raw(filename):\n env = current_app.jinja_env\n source = current_app.jinja_loader.get_source(env, filename)[0]\n return Markup(source)\n\n@paste.route('/')\ndef index():\n return Response(render_template(\"index.html\"), mimetype='text/html')\n\n@paste.route('/f')\ndef form():\n return Response(render_template(\"form.html\"), mimetype='text/html')\n\n@paste.route('/', methods=['POST'])\n@paste.route('/', methods=['POST'])\ndef post(vanity=None):\n stream, filename = request_content()\n if not stream:\n return \"Nope.\\n\", 400\n\n cur = model.get_digest(stream)\n if not cur.count():\n if vanity:\n label, _ = vanity\n paste = model.insert(stream, label=label)\n elif request.form.get('p'):\n paste = model.insert(stream, private=1)\n else:\n paste = model.insert(stream)\n uuid = str(UUID(hex=paste['_id']))\n else:\n paste = cur.__next__()\n uuid = ''\n\n url = any_url(paste, filename=filename)\n gs = lambda l: current_app.url_map.converters['sid'].to_url(None, paste['digest'], l)\n\n body = {\n 'url': url,\n 'long': gs(42),\n 'short': gs(6),\n 'uuid': uuid,\n 'sha1': paste['digest']\n }\n\n return redirect(url, safe_dump(body, default_flow_style=False))\n\n@paste.route('/', methods=['PUT'])\ndef put(uuid):\n stream, filename = request_content()\n if not stream:\n return \"Nope.\\n\", 400\n\n cur = model.get_digest(stream)\n if cur.count():\n url = any_url(cur.__next__())\n return redirect(url, \"Paste already exists.\\n\", 409)\n\n result = model.put(uuid, stream)\n if result['n']:\n # FIXME: need to invalidate cache\n return \"{} pastes updated.\\n\".format(result['n']), 200\n\n return \"Not found.\\n\", 404\n\n@paste.route('/', methods=['DELETE'])\ndef delete(uuid):\n result = model.delete(uuid)\n if result['n']:\n # FIXME: need to invalidate cache\n return \"{} pastes deleted.\\n\".format(result['n']), 200\n return \"Not found.\\n\", 404\n\n@paste.route('/')\n@paste.route('//')\n@paste.route('//')\n@paste.route('/')\n@paste.route('//')\n@paste.route('//')\n@paste.route('/')\n@paste.route('//')\n@paste.route('//')\n@paste.route('/')\n@paste.route('//')\n@paste.route('//')\ndef get(sid=None, sha1=None, label=None, lexer=None, handler=None):\n cur = None\n if sid:\n sid, name, value = sid\n cur = model.get_content(**{\n '$or' : [\n {\n 'digest': {\n '$regex': '{}$'.format(sid)\n }\n },\n {\n 'label' : value\n }\n ],\n 'private': {\n '$exists': False\n }\n })\n if sha1:\n digest, name = sha1[:2]\n cur = model.get_content(digest = digest).hint([('digest', 1)])\n if label:\n label, name = label\n cur = model.get_content(label = label).hint([('label', 1)])\n\n if not cur or not cur.count():\n return \"Not found.\\n\", 404\n\n paste = cur.__next__()\n content = model._get(paste.get('content'))\n\n if paste.get('redirect'):\n content = content.decode('utf-8')\n return redirect(content, '{}\\n'.format(content))\n\n mimetype, _ = guess_type(name)\n\n if lexer != None:\n return highlight(content, lexer)\n if handler != None:\n return _handler.get(handler, content, mimetype)\n if mimetype:\n return Response(content, mimetype=mimetype)\n\n return content\n\n@paste.route('/u', methods=['POST'])\ndef url():\n stream, _ = request_content()\n if not stream:\n return \"Nope.\\n\", 400\n\n stream = BytesIO(stream.read().decode('utf-8').split('\\n')[0].encode('utf-8'))\n\n cur = model.get_digest(stream)\n if not cur.count():\n url = model.insert(stream, redirect=1)\n else:\n url = cur.__next__()\n\n url = id_url(sid=url['digest'])\n return redirect(url, \"{}\\n\".format(url), 200)\n\n@paste.route('/s')\ndef stats():\n cur = model.get_stats()\n return safe_dump(dict(pastes=cur.count()), default_flow_style=False)\n\n@paste.route('/static/highlight.css')\ndef highlight_css():\n css = HtmlFormatter().get_style_defs('.code')\n return Response(css, mimetype='text/css')\n\n@paste.route('/l')\ndef list_lexers():\n lexers = '\\n'.join(' '.join(i) for _, i, _, _ in get_all_lexers())\n return '{}\\n'.format(lexers)\n","repo_name":"Xecantur/pb","sub_path":"pb/paste/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"75148571145","text":"import cv2\nimport numpy as np\nimport imgaug as ia\nimport imgaug.augmenters as iaa\nimport os\n\nfrom tqdm import tqdm\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom glob import glob\nimport json\nfrom tqdm import tqdm\nimport shutil\n\n\ndef convert_bbox_coco2yolo(img_width, img_height, bbox):\n \"\"\"\n Convert bounding box from COCO format to YOLO format\n\n Parameters\n ----------\n img_width : int\n width of image\n img_height : int\n height of image\n bbox : list[int]\n bounding box annotation in COCO format: \n [top left x position, top left y position, width, height]\n\n Returns\n -------\n list[float]\n bounding box annotation in YOLO format: \n [x_center_rel, y_center_rel, width_rel, height_rel]\n \"\"\"\n \n # YOLO bounding box format: [x_center, y_center, width, height]\n # (float values relative to width and height of image)\n x_tl, y_tl, w, h = bbox\n\n dw = 1.0 / img_width\n dh = 1.0 / img_height\n\n x_center = x_tl + w / 2.0\n y_center = y_tl + h / 2.0\n\n x = x_center * dw\n y = y_center * dh\n w = w * dw\n h = h * dh\n\n return [x, y, w, h]\n\ndef make_folders(path=\"output\"):\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n return path\n\n\ndef convert_coco_json_to_yolo_txt(output_path, json_file):\n\n path = make_folders(output_path)\n\n with open(json_file, encoding='utf-8') as f:\n json_data = json.load(f)\n\n # write _darknet.labels, which holds names of all classes (one class per line)\n label_file = os.path.join(output_path, \"data.names\")\n with open(label_file, \"w\", encoding='utf-8') as f:\n for category in tqdm(json_data[\"categories\"], desc=\"Categories\"):\n category_name = category[\"name\"]\n f.write(f\"{category_name}\\n\")\n\n for image in tqdm(json_data[\"images\"], desc=\"Annotation txt for each iamge\"):\n img_id = image[\"id\"]\n img_name = image[\"file_name\"]\n img_width = image[\"width\"]\n img_height = image[\"height\"]\n\n anno_in_image = [anno for anno in json_data[\"annotations\"] if anno[\"image_id\"] == img_id]\n anno_txt = os.path.join(output_path, img_name.split(\".\")[0] + \".txt\")\n with open(anno_txt, \"w\", encoding='utf-8') as f:\n for anno in anno_in_image:\n category = anno[\"category_id\"] - 1\n bbox_COCO = anno[\"bbox\"]\n x, y, w, h = convert_bbox_coco2yolo(img_width, img_height, bbox_COCO)\n f.write(f\"{category} {x:.6f} {y:.6f} {w:.6f} {h:.6f}\\n\")\n\n print(\"Converting COCO Json to YOLO txt finished!\")\n\n\ndef check_class_balance(class_target_list):\n cate = [0 for _ in range(14)]\n\n for train_labels in tqdm(class_target_list): # 1 txt file\n with open(train_labels, 'r', encoding='utf-8') as f:\n label = f.readlines() # 1 txt file labels\n for lab in label: # 1 line, in 1 txt file\n lab = lab.split(' ')\n cls = int(lab[0])\n\n if cls == 0:\n cate[0] += 1\n elif cls == 1:\n cate[1] += 1\n elif cls == 2:\n cate[2] += 1\n elif cls == 3:\n cate[3] += 1\n elif cls == 4:\n cate[4] += 1\n elif cls == 5:\n cate[5] += 1\n elif cls == 6:\n cate[6] += 1\n elif cls == 7:\n cate[7] += 1\n elif cls == 8:\n cate[8] += 1\n elif cls == 9:\n cate[9] += 1\n elif cls == 10:\n cate[10] += 1\n elif cls == 11:\n cate[11] += 1\n elif cls == 12:\n cate[12] += 1\n elif cls == 13:\n cate[13] += 1 \n\n result_cate = cate\n print(result_cate)\n\n X = np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13])\n Y = np.array(result_cate)\n\n # ax = sns.barplot(X,Y, order=X)\n # for p, q in zip(ax.patches, Y):\n # ax.text(p.get_x()+p.get_width()/2.,\n # p.get_height()*(1.01),\n # \"{}\".format(q),\n # ha = 'center' )\n\n # plt.show()\n \n \ndef yolobbox2bbox(x, y, w, h, w_t, h_t):\n x1, y1 = (x - (w / 2.0)) * w_t, (y - (h / 2.0)) * h_t\n x2, y2 = (x + (w / 2.0)) * w_t, (y + (h / 2.0)) * h_t\n x1 = round(x1, 1)\n y1 = round(y1, 1)\n x2 = round(x2, 1)\n y2 = round(y2, 1)\n \n return (x1, y1, x2, y2)\n\n\ndef bbox2yolobbox(x1, y1, x2, y2, w_t, h_t):\n x = round((x2 + x1) / (2 * w_t), 6)\n y = round((y2 + y1) / (2 * h_t), 6)\n w = round((x2 - x1) / w_t, 6)\n h = round((y2 - y1) / h_t, 6)\n \n return (x, y, w, h)\n\n\ndef find_class_images(target_class, all_train_label_list):\n target_class = target_class\n num_class_target = 0\n class_target_list = []\n\n for train_labels in tqdm(all_train_label_list): # 1 txt file\n with open(train_labels, 'r', encoding='utf-8') as f:\n label = f.readlines() # 1 txt file labels\n for lab in label: # 1 line, in 1 txt file\n lab = lab.split(' ')\n cls, x, y, w, h = int(lab[0]), float(lab[1]), float(lab[2]), float(lab[3]), float(lab[4])\n\n if cls == target_class:\n num_class_target += 1\n\n class_target_list.append(train_labels)\n\n print(\"Class Num:\", num_class_target)\n print(\"Image Num:\", len(set(class_target_list)))\n \n return class_target_list\n\n\n# Img Aug Function\ndef aug_images(img_path, txt_path, output_img_path, output_txt_path):\n img_array = np.fromfile(img_path, np.uint8)\n img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)\n input_img = img[np.newaxis, :, :, :]\n\n txts = np.loadtxt(txt_path, dtype = str, delimiter = ' ').astype(float)\n if txts.ndim == 1:\n txts = txts[np.newaxis, :]\n \n labels = []\n for i, txt in enumerate(txts):\n x1, y1, x2, y2 = yolobbox2bbox(txt[1], txt[2], txt[3], txt[4], 1920, 1080)\n input_label = [int(txt[0]), x1, y1, x2, y2]\n labels.append(input_label)\n \n bbox = []\n for label in labels:\n bbox.append(ia.BoundingBox(x1 = label[1], y1 = label[2], x2 = label[3], y2 = label[4], label = label[0]))\n\n seq = iaa.Sequential([\n iaa.Affine(\n scale={\"x\": (0.5, 0.7), \"y\": (0.5, 0.7)},\n rotate = (-15, 15)\n ),\n iaa.AdditiveGaussianNoise(scale = (0.05*255, 0.10*255)),\n iaa.GaussianBlur((0, 1.0)),\n iaa.PerspectiveTransform(scale=(0.01, 0.02))\n ])\n\n output_img, output_bbox = seq(images = input_img, bounding_boxes = bbox)\n output_img = np.squeeze(output_img, axis=0)\n\n result, encoded_img = cv2.imencode('.png', output_img)\n \n if result:\n with open(output_img_path, mode='w+b') as f:\n encoded_img.tofile(f)\n\n with open(output_txt_path, 'w', encoding='utf-8') as f:\n for bbox in output_bbox:\n x, y, w, h = bbox2yolobbox(bbox.x1, bbox.y1, bbox.x2, bbox.y2, 1920, 1080)\n\n line = str(bbox.label) + ' ' + str(x) + ' ' + str(y) + ' ' + str(w) + ' ' + str(h) + '\\n'\n f.write(line)\n \n\nif __name__ == \"__main__\":\n print(\"Folder Copy Started!\")\n shutil.copytree(\"../../DATA/train\", \"../FINAL_DATA/train\") # Directory Copy\n print(\"Folder Copy Finished!\")\n \n if not os.path.isdir('../FINAL_DATA/train/labels'):\n os.mkdir('../FINAL_DATA/train/labels')\n \n convert_coco_json_to_yolo_txt(\"../FINAL_DATA/train/labels\", \"../FINAL_DATA/train/label/Train.json\") # Convert Json to Txt\n \n print(\"Augmentation Started!\")\n \n train_label_list = glob(\"../FINAL_DATA/train/labels/*.txt\")\n\n class12_target_list = find_class_images(12, train_label_list)\n class10_target_list = find_class_images(10, train_label_list)\n class08_target_list = find_class_images(8, train_label_list)\n class03_target_list = find_class_images(3, train_label_list)\n class06_target_list = find_class_images(6, train_label_list)\n \n # Class 12(x 50), 10(x 10), 8(x 10), 3(x 5), 6(x 2), 2(x 2)\n class12_target_set = set(class12_target_list)\n class10_target_set = set(class10_target_list)\n class08_target_set = set(class08_target_list)\n class03_target_set = set(class03_target_list)\n class06_target_set = set(class06_target_list)\n \n # Seed Setting\n ia.seed(42)\n \n for i in tqdm(range(50)):\n for path in class12_target_set:\n txt_path = path\n img_path = path.replace(\"labels\", \"images\").replace(\"txt\", \"png\")\n\n aug_images(img_path, txt_path, img_path[:-4] + \"_aug\" + str(i) + \".png\", txt_path[:-4] + \"_aug\" + str(i) + \".txt\")\n\n for i in tqdm(range(10)):\n for path in class10_target_set:\n txt_path = path\n img_path = path.replace(\"labels\", \"images\").replace(\"txt\", \"png\")\n\n aug_images(img_path, txt_path, img_path[:-4] + \"_aug\" + str(i) + \".png\", txt_path[:-4] + \"_aug\" + str(i) + \".txt\")\n\n for i in tqdm(range(10)):\n for path in class08_target_set:\n txt_path = path\n img_path = path.replace(\"labels\", \"images\").replace(\"txt\", \"png\")\n\n aug_images(img_path, txt_path, img_path[:-4] + \"_aug\" + str(i) + \".png\", txt_path[:-4] + \"_aug\" + str(i) + \".txt\")\n\n for i in tqdm(range(5)):\n for path in class03_target_set:\n txt_path = path\n img_path = path.replace(\"labels\", \"images\").replace(\"txt\", \"png\")\n\n aug_images(img_path, txt_path, img_path[:-4] + \"_aug\" + str(i) + \".png\", txt_path[:-4] + \"_aug\" + str(i) + \".txt\")\n\n for i in tqdm(range(2)):\n for path in class06_target_set:\n txt_path = path\n img_path = path.replace(\"labels\", \"images\").replace(\"txt\", \"png\")\n\n aug_images(img_path, txt_path, img_path[:-4] + \"_aug\" + str(i) + \".png\", txt_path[:-4] + \"_aug\" + str(i) + \".txt\")\n \n all_train_label_list = glob(\"../FINAL_DATA/train/labels/*.txt\")\n check_class_balance(all_train_label_list)\n print(\"Total Image Number: \", len(all_train_label_list)) \n print(\"Augmentation Finished!\")","repo_name":"Ikgyu-Lee/2022-ai-online-competition","sub_path":"yolov5/augmentation.py","file_name":"augmentation.py","file_ext":"py","file_size_in_byte":10239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73114838985","text":"#\n# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)\n#\n# Created with the Rule Development Kit: https://github.com/awslabs/aws-config-rdk\n# Can be used stand-alone or with the Rule Compliance Engine: https://github.com/awslabs/aws-config-engine-for-compliance-as-code\n#\nimport sys\nimport unittest\ntry:\n from unittest.mock import MagicMock, patch, ANY\nexcept ImportError:\n import mock\n from mock import MagicMock, patch, ANY\nimport botocore\nfrom botocore.exceptions import ClientError\nimport json\n\n##############\n# Parameters #\n##############\n\n# Define the default resource to report to Config Rules\nDEFAULT_RESOURCE_TYPE = 'AWS::EC2::Instance'\n\n#############\n# Main Code #\n#############\n\nconfig_client_mock = MagicMock()\nsts_client_mock = MagicMock()\nec2_client_mock = MagicMock()\n\nclass Boto3Mock():\n def client(self, client_name, *args, **kwargs):\n if client_name == 'config':\n return config_client_mock\n elif client_name == 'sts':\n return sts_client_mock\n elif client_name == 'ec2':\n return ec2_client_mock\n else:\n raise Exception(\"Attempting to create an unknown client\")\n\nsys.modules['boto3'] = Boto3Mock()\n\nrule = __import__('EC2_TAG_MATCHES_INSTANCE_PROFILE_NAME')\n\nclass SampleTest(unittest.TestCase):\n\n # Scenario 1 : EC2 instance has no tag with key equal to TAG_KEY.\n def test_rule_scenario1(self):\n invoking_event = build_invoking_event(\"\",None)\n response = rule.lambda_handler(build_lambda_event(ruleParameters='{}', invoking_event=invoking_event), \"\")\n expected_response = []\n expected_response.append(build_expected_response(\"NOT_APPLICABLE\", \"some-resource-id\"))\n assert_successful_evaluation(self, response, expected_response)\n \n # Scenario 2 : EC2 instance does not have TAG_VALUE_MUST_INCLUDE in tag value and has no instance profile.\n def test_rule_scenario2(self):\n application_role = {\"application_role\": \"blah\"}\n invoking_event = build_invoking_event(application_role,None)\n response = rule.lambda_handler(build_lambda_event(ruleParameters='{}', invoking_event=invoking_event), \"\")\n expected_response = []\n expected_response.append(build_expected_response(\"NOT_APPLICABLE\", \"some-resource-id\"))\n assert_successful_evaluation(self, response, expected_response)\n \n # Scenario 3 : EC2 instance does not have TAG_VALUE_MUST_INCLUDE in tag value and instance profile does not have NAME_ROLE_MUST_INCLUDE in IAM instance profile\n def test_rule_scenario3(self):\n application_role = {\"application_role\": \"blah\"}\n instance_profile = {\"arn\": \"arn:aws:iam::123456789012:instance-profile/aws-poc.np.non-db.ec2role.iaminstancerole\"}\n invoking_event = build_invoking_event(application_role, instance_profile)\n response = rule.lambda_handler(build_lambda_event(ruleParameters='{}', invoking_event=invoking_event), \"\")\n expected_response = []\n expected_response.append(build_expected_response(\"NOT_APPLICABLE\", \"some-resource-id\"))\n assert_successful_evaluation(self, response, expected_response)\n \n # Scenario 4 : EC2 instance does not have TAG_VALUE_MUST_INCLUDE in tag value but has NAME_ROLE_MUST_INCLUDE in IAM instance profile \n def test_rule_scenario4(self):\n application_role = {\"application_role\": \"blah\"}\n instance_profile = {\"arn\": \"arn:aws:iam::123456789012:instance-profile/aws-poc.np.db.ec2role.iaminstancerole\"}\n invoking_event = build_invoking_event(application_role, instance_profile)\n response = rule.lambda_handler(build_lambda_event(ruleParameters='{}', invoking_event=invoking_event), \"\")\n expected_response = []\n expected_response.append(build_expected_response(\"NON_COMPLIANT\", \"some-resource-id\", annotation=\"Tag value for 'application_role' doesn't have 'DB' but IAM Instance Profile has '.db.'\"))\n assert_successful_evaluation(self, response, expected_response)\n \n # Scenario 5 : EC2 instance has TAG_VALUE_MUST_INCLUDE in tag value but does not have an IAM instance profile\n def test_rule_scenario5(self):\n application_role = {\"application_role\": \"DB/APP\"}\n instance_profile = None\n invoking_event = build_invoking_event(application_role, instance_profile)\n response = rule.lambda_handler(build_lambda_event(ruleParameters='{}', invoking_event=invoking_event), \"\")\n expected_response = []\n expected_response.append(build_expected_response(\"NON_COMPLIANT\", \"some-resource-id\", annotation=\"Tag value for 'application_role' has 'DB' but there is no IAM instance profile for the resource\"))\n assert_successful_evaluation(self, response, expected_response)\n \n # Scenario 6 : EC2 instance has TAG_VALUE_MUST_INCLUDE in tag value but does not have NAME_ROLE_MUST_INCLUDE in IAM instance profile\n def test_rule_scenario6(self):\n application_role = {\"application_role\": \"DB/APP\"}\n instance_profile = {\"arn\": \"arn:aws:iam::123456789012:instance-profile/aws-poc.np.non-db.ec2role.iaminstancerole\"}\n invoking_event = build_invoking_event(application_role, instance_profile) \n response = rule.lambda_handler(build_lambda_event(ruleParameters='{}', invoking_event=invoking_event), \"\")\n expected_response = []\n expected_response.append(build_expected_response(\"NON_COMPLIANT\", \"some-resource-id\", annotation=\"Tag value for 'application_role' has 'DB' but IAM Instance Profile doesn't have '.db.'\"))\n assert_successful_evaluation(self, response, expected_response)\n \n # Scenario 7 : EC2 instance has TAG_VALUE_MUST_INCLUDE in tag value and NAME_ROLE_MUST_INCLUDE in IAM instance profile\n def test_rule_scenario7(self):\n application_role = {\"application_role\": \"DB/APP\"}\n instance_profile = {\"arn\": \"arn:aws:iam::123456789012:instance-profile/aws-poc.np.db.ec2role.iaminstancerole\"}\n invoking_event = build_invoking_event(application_role, instance_profile)\n response = rule.lambda_handler(build_lambda_event(ruleParameters='{}', invoking_event=invoking_event), \"\")\n expected_response = []\n expected_response.append(build_expected_response(\"COMPLIANT\", \"some-resource-id\"))\n assert_successful_evaluation(self, response, expected_response)\n\n####################\n# Helper Functions #\n####################\n\ndef build_lambda_event(ruleParameters, invoking_event):\n return {\n 'executionRoleArn': 'roleArn',\n 'eventLeftScope': False,\n 'invokingEvent': invoking_event,\n 'ruleParameters': ruleParameters,\n 'accountId': 'account-id',\n 'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',\n 'resultToken': 'token',\n\n }\n\ndef build_invoking_event(tags, iamInstanceProfileARN):\n invoking_event = {\n \"messageType\":\"ConfigurationItemChangeNotification\",\n \"configurationItem\":{\n \"resourceType\":\"AWS::EC2::Instance\",\n \"resourceId\": \"some-resource-id\",\n \"configurationItemStatus\": \"OK\",\n \"configurationItemCaptureTime\": \"anytime\",\n \"tags\": tags,\n \"configuration\": { \"iamInstanceProfile\": iamInstanceProfileARN }\n }\n }\n return json.dumps(invoking_event)\n\ndef build_lambda_configurationchange_event(invoking_event, rule_parameters=None):\n event_to_return = {\n 'configRuleName':'myrule',\n 'executionRoleArn':'roleArn',\n 'eventLeftScope': True,\n 'invokingEvent': invoking_event,\n 'accountId': '123456789012',\n 'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',\n 'resultToken':'token'\n }\n if rule_parameters:\n event_to_return['ruleParameters'] = rule_parameters\n return event_to_return\n\ndef build_lambda_scheduled_event(rule_parameters=None):\n invoking_event = '{\"messageType\":\"ScheduledNotification\",\"notificationCreationTime\":\"2017-12-23T22:11:18.158Z\"}'\n event_to_return = {\n 'configRuleName':'myrule',\n 'executionRoleArn':'roleArn',\n 'eventLeftScope': True,\n 'invokingEvent': invoking_event,\n 'accountId': '123456789012',\n 'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',\n 'resultToken':'token'\n }\n if rule_parameters:\n event_to_return['ruleParameters'] = rule_parameters\n return event_to_return\n\ndef build_expected_response(compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):\n if not annotation:\n return {\n 'ComplianceType': compliance_type,\n 'ComplianceResourceId': compliance_resource_id,\n 'ComplianceResourceType': compliance_resource_type\n }\n return {\n 'ComplianceType': compliance_type,\n 'ComplianceResourceId': compliance_resource_id,\n 'ComplianceResourceType': compliance_resource_type,\n 'Annotation': annotation\n }\n\ndef assert_successful_evaluation(testClass, response, resp_expected, evaluations_count=1):\n if isinstance(response, dict):\n testClass.assertEquals(resp_expected['ComplianceType'], response['ComplianceType'])\n testClass.assertEquals(resp_expected['ComplianceResourceType'], response['ComplianceResourceType'])\n testClass.assertEquals(resp_expected['ComplianceResourceId'], response['ComplianceResourceId'])\n testClass.assertTrue(response['OrderingTimestamp'])\n if 'Annotation' in resp_expected or 'Annotation' in response:\n testClass.assertEquals(resp_expected['Annotation'], response['Annotation'])\n elif isinstance(response, list):\n testClass.assertEquals(evaluations_count, len(response))\n for i, response_expected in enumerate(resp_expected):\n testClass.assertEquals(response_expected['ComplianceType'], response[i]['ComplianceType'])\n testClass.assertEquals(response_expected['ComplianceResourceType'], response[i]['ComplianceResourceType'])\n testClass.assertEquals(response_expected['ComplianceResourceId'], response[i]['ComplianceResourceId'])\n testClass.assertTrue(response[i]['OrderingTimestamp'])\n if 'Annotation' in response_expected or 'Annotation' in response[i]:\n testClass.assertEquals(response_expected['Annotation'], response[i]['Annotation'])","repo_name":"awslabs/aws-config-rules","sub_path":"python/EC2_TAG_MATCHES_INSTANCE_PROFILE_NAME/EC2_TAG_MATCHES_INSTANCE_PROFILE_NAME_test.py","file_name":"EC2_TAG_MATCHES_INSTANCE_PROFILE_NAME_test.py","file_ext":"py","file_size_in_byte":10514,"program_lang":"python","lang":"en","doc_type":"code","stars":1509,"dataset":"github-code","pt":"81"} +{"seq_id":"37247748915","text":"# We can count the gap only.But how?\n# Hashmap!\n# key:position value: count of the gap\n# rows - the position with max gaps\n\n\nclass Solution:\n\n def leastBricks(self, wall: List[List[int]]) -> int:\n countGap = {0: 0}\n # mapping posotion:count of brick gaps\n # 加這個,是要確保其非empty\n\n for row in wall:\n total = 0\n for brick in row[:-1]:\n # 3/10 不可include the most right gap\n total += brick\n countGap[total] = 1 + countGap.get(total, 0)\n\n return len(wall) - max(countGap.values())","repo_name":"YiruDing/LeetcodePractice","sub_path":"Neetcode305/Array,String,&Hashing/554_Brick_Wall.py","file_name":"554_Brick_Wall.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8997019305","text":"basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']\r\nfruits = set(basket)\r\nfruits\r\n'Maine' in atlantic_states\r\nTrue\r\n'Oregon' in atlantic_states\r\na - b\r\na.difference(b)\r\nroundthings = ['orange', 'gumball', 'egg', 'tire']\r\npinkthings = ['lipstick', 'gumball', 'baby blanket']\r\na = set(roundthings)\r\nq = set(pinkthings)\r\na - q","repo_name":"jossrodes/PYTHONFORDUMMIES","sub_path":"096_what_sets_are.py","file_name":"096_what_sets_are.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11901544116","text":"import os\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom PIL import Image\n\n# 定义模型超参数\n# 学习率\nlearning_rate = 0.001\n# 迭代次数\nnum_epochs = 120\n# 批大小\nbatch_size = 128\n\n\n# 加载数据集\ndef load_dataset(directory):\n images = []\n labels = []\n for filename in os.listdir(directory):\n # 解析文件名获取标签\n # 除了扩展名最后一位数字\n label = int(filename[-5])\n filepath = os.path.join(directory, filename)\n image = tf.io.read_file(filepath)\n # 图片解码\n image = tf.image.decode_png(image)\n image = tf.image.resize(image, (128, 128))\n images.append(image)\n labels.append(label)\n return np.array(images), np.array(labels)\n\n\n# 加载数据集\n# 训练集\ntrain_images, train_labels = load_dataset('D:\\\\DATA1\\\\MRIi\\\\train')\n# 验证集\nval_images, val_labels = load_dataset('D:\\\\DATA1\\\\MRIi\\\\val')\n# 测试集\ntest_images, test_labels = load_dataset('D:\\\\DATA1\\\\MRIi\\\\test')\n\n# 将数据集转化为 TensorFlow Dataset 对象\ntrain_dataset = tf.data.Dataset.from_tensor_slices((train_images, train_labels))\nval_dataset = tf.data.Dataset.from_tensor_slices((val_images, val_labels))\ntest_dataset = tf.data.Dataset.from_tensor_slices((test_images, test_labels))\n\n# 创建 ImageDataGenerator 对象并设置增强参数\ndatagen = ImageDataGenerator(\n rotation_range=20, # 随机旋转角度范围20°\n width_shift_range=0.1, # 随机水平平移范围10%\n height_shift_range=(0, 0.1), # 随机垂直平移范围向上10%\n shear_range=0.2, # 随机剪切强度\n zoom_range=(0.8, 1.05), # 随机缩放范围10%\n horizontal_flip=True, # 水平翻转\n vertical_flip=False # 垂直翻转\n)\n\ntrain_images_positive = train_images[train_labels == 1]\ntrain_images_positive = train_images_positive.reshape((-1, 128, 128, 3))\n\n# 设置生成的批次大小和生成样本的数量\ndesired_samples = 1000 # 生成的增强样本数量\n\n# 创建目录用于保存增强样本\nsave_dir = 'D:/DATA1/AugmentedSamples'\nos.makedirs(save_dir, exist_ok=True)\n\n# 生成增强样本并保存\ncounter = 0\n\nfor batch_images in datagen.flow(train_images_positive, batch_size=32):\n for image in batch_images:\n filename = f'aug_{counter}.png'\n save_path = os.path.join(save_dir, filename)\n tf.keras.preprocessing.image.save_img(save_path, image)\n\n counter += 1\n if counter >= desired_samples:\n break\n if counter >= desired_samples:\n break\n","repo_name":"hecang01/LDHmri","sub_path":"AugmentedSamples.py","file_name":"AugmentedSamples.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25242798088","text":"\"\"\"\n描述\n给出一个单链表,返回删除单链表的倒数第 K 个节点的链表。\n输入描述:\nn 表示链表的长度。\nval 表示链表中节点的值。\n输出描述:\n在给定的函数内返回链表的头指针。\n示例1\n输入:\n5 4\n1 2 3 4 5\n输出:\n1 3 4 5\n\"\"\"\n\n\nclass LinkList:\n def __init__(self, v) -> None:\n self.value = v\n self.next = None\n\n\nclass Solution:\n def deletereknode(self, k, l):\n p1 = l\n p2 = l\n for _ in range(k):\n p1 = p1.next\n while p1.next:\n p1 = p1.next\n p2 = p2.next\n p2.next=p2.next.next\n return l\n\n\nif __name__ == '__main__':\n n, k = input().split()\n vals = [int(item) for item in input().split()]\n l = LinkList(0)\n p = l\n for val in vals:\n node = LinkList(val)\n p.next = node\n p = node\n test = Solution()\n res = test.deletereknode(int(k), l.next)\n while res:\n print(res.value, end=\" \")\n res = res.next\n","repo_name":"LeungLoh/algorithm","sub_path":"程序员代码面试指南/CD49 在链表中删除倒数第K个节点.py","file_name":"CD49 在链表中删除倒数第K个节点.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27199912972","text":"import logging\nfrom os import path\nfrom openpyxl import load_workbook\nfrom selenium import webdriver\nfrom scraper.account import Account\nfrom scraper.constants import Constants as constants\nfrom scraper.person import Person\nfrom scraper.search import Search\n\n# Group profiles scraper\ncrome_path = path.abspath(\"chrome_driver/chromedriver\")\ndriver = webdriver.Chrome(crome_path)\naccount = Account(driver)\naccount.login(constants.email, constants.password)\n# Get search parameters\nwb = load_workbook('search_results.xlsx')\nws_search = wb[\"Search_parameters\"]\npage_limit = ws_search['D2'].value\nresults_limit = ws_search['E2'].value\nmy_search = Search(\n driver, ws_search['A2'].value, ws_search['B2'].value, ws_search['C2'].value, pages_limit=page_limit)\n# Start search\nmy_search.search()\nurls = my_search.results_url_array\n# Get persons data\nresult_persons = []\nfor url in urls:\n try:\n my_person = Person(driver, person_url=url)\n result_persons.append(my_person)\n # page limit\n if len(result_persons) >= results_limit:\n break\n except:\n logging.ERROR(\"Person data error url: \" + url)\n# Save data to excel\n# Create a new sheet\nws = wb.create_sheet(title=\"LinkedIn data\")\n# excel titles\nws.append([\"name\", \"job title\", \"location\", \"connections\", \"about\",\n \"experiences\", \"educations\", \"accomplishments\", \"profile url\"])\nfor person in result_persons:\n data = []\n data.append(person.name)\n data.append(person.job_title)\n data.append(person.location)\n data.append(person.connections)\n data.append(\"{about}\".format(about=person.about))\n data.append(\"{experiences}\".format(experiences=person.experiences))\n data.append(\"{educations}\".format(educations=person.educations))\n data.append(\"{accomplishment}\".format(\n accomplishment=person.accomplishments))\n data.append(\"{url}\".format(url=person.person_url))\n ws.append(data)\n# Save data\nwb.save('search_results.xlsx')\n","repo_name":"enikakis/scrapper","sub_path":"group_profiles_scraper.py","file_name":"group_profiles_scraper.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"655821492","text":"\"\"\"\nProgram: coverages.py\nAuthor: Noah Allan Ertz\nLast Date Modified: 2021-08-31\n\nConstant(s) for insurance coverages\n\"\"\"\n\n# Ages\nAGE_0 = 16\nAGE_1 = 25\nAGE_2 = 35\nAGE_3 = 45\nAGE_4 = 55\nAGE_5 = 65\n\n# State Minimum Coverages\nSTATE_MINIMUM_0 = 2593\nSTATE_MINIMUM_1 = 608\nSTATE_MINIMUM_2 = 552\nSTATE_MINIMUM_3 = 525\nSTATE_MINIMUM_4 = 494\nSTATE_MINIMUM_5 = 515\n\n# Liability Coverages\nLIABILITY_COVERAGE_0 = 2957\nLIABILITY_COVERAGE_1 = 691\nLIABILITY_COVERAGE_2 = 627\nLIABILITY_COVERAGE_3 = 596\nLIABILITY_COVERAGE_4 = 560\nLIABILITY_COVERAGE_5 = 585\n\n# Full Coverages\nFULL_COVERAGE_0 = 6930\nFULL_COVERAGE_1 = 1745\nFULL_COVERAGE_2 = 1564\nFULL_COVERAGE_3 = 1469\nFULL_COVERAGE_4 = 1363\nFULL_COVERAGE_5 = 1402\n\n# Accident Rate Increase Percentage\nACCIDENT_RATE_INCREASE = 0.41\n","repo_name":"naertz/NAE_CIS-289_Module-01_Topic-04_Insurance-Quote-Assignment","sub_path":"constants/coverages.py","file_name":"coverages.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7428017535","text":"import numpy as np\nfrom numpy.random import RandomState, SeedSequence, MT19937\nimport matplotlib.pyplot as plt\n\nrs = RandomState(MT19937(SeedSequence(1024)))\n\nT = 1\nN = 2000\ndt = T / N\n\n\ndef path():\n dW = rs.normal(scale=dt ** 0.5, size=(N, 2))\n dW = np.vstack((np.zeros(2), dW))\n Wt = np.cumsum(dW, axis=0)\n return Wt.T\n\n\n# Uniform event for different parameter paris\npathA, pathB = path()\n\ncolor_map = ['#eac435', '#345995', '#e40066', '#03cea4']\n\nt = np.linspace(0, T, N + 1)\nfor (alpha, beta), c in zip([(0.5, 0.5), (0.5, 1), (1, 0.5), (1, 1)], color_map):\n # Different event for different params\n pathA, pathB = path()\n plt.plot(t, np.exp(alpha * t + beta * pathA),\n label=f'$\\\\alpha={alpha},\\\\beta={beta}$', color=c)\n plt.plot(t, np.exp(alpha * t + beta * pathB),\n color=c)\n plt.plot(t, np.exp((alpha + beta ** 2 / 2) * t),\n color=c)\nplt.ylim(0, 5)\nplt.xlabel('t')\nplt.title(\n '$f(t,W_t)=\\\\exp(\\\\alpha t+\\\\beta W_t),Ef(t,W_t)=\\\\exp((\\\\alpha+\\\\beta^2/2)t)$')\nplt.legend()\nplt.show()\n","repo_name":"OopsYao/homework","sub_path":"SDE/hw12/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42653707332","text":"import numpy as np\nimport cv2\n\n\nimg = cv2.imread('images/bicycle.png')\ncv2.imshow('Bicycle', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\ncropped = img[109:310, 9:160]\ncv2.imshow('Cropped Image', cropped)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","repo_name":"KECB/learn","sub_path":"computer_vision/03_cropping_images.py","file_name":"03_cropping_images.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"18309621597","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-#\n'''\nauthor: -- shidegang --\nCreated Time: 2019-08-28 10:43:17\n'''\n\nimport threading\nfrom time import sleep,ctime\n\ndef music(a):\n for i in range(2):\n print('Begin to listen to %s. %s' %(a,ctime()))\n sleep(2)\n print('End listen %s' %ctime())\n\ndef video(a):\n for i in range(2):\n print('Begin to watch at the %s %s' %(a,ctime()))\n sleep(3)\n print('End watch %s' %ctime())\n\nthreads = []\n\nt1 = threading.Thread(target=music,args=('七里香',))\nthreads.append(t1)\n\nt2 = threading.Thread(target=video,args=('阿甘正传',))\nthreads.append(t2)\n\nif __name__ == '__main__':\n\n t1.setDaemon(True) # 对一个线程设置了setDaemon,则主进程在执行完成之后不会等待该线程执行,直接退出,守护线程也跟着一起结束\n # setDaemon必须在start之前设置\n for thread in threads:\n thread.start()\n\n print('############# All over ############## %s' %ctime())","repo_name":"shidg/note","sub_path":"python/study/Process_Threads/守护线程-setDaemon.py","file_name":"守护线程-setDaemon.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"33674124264","text":"\"\"\"\nThis script uses Selenium to scrape data related to character paths, rarities, and elements\nfrom the https://www.prydwen.gg/star-rail/ website. It allows users to input URLs manually or automatically\nand saves the collected data to an Excel file.\n\n\"\"\"\n\nimport web_scrap as ws\nimport pandas as pd\nimport get_urls_auto as get_urls\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef scrape_paths_elements_rarities(url, data):\n try:\n # Send an HTTP GET request to the URL\n response = requests.get(url)\n\n # Check if the request was successful (status code 200)\n if response.status_code == 200:\n # Parse the HTML content with BeautifulSoup\n soup = BeautifulSoup(response.text, 'html.parser')\n\n # Find elements based on class names\n path_elements = soup.find_all(class_=[\"Nihility\", \"Hunt\", \"Abundance\", \"Destruction\", \"Erudition\", \"Harmony\", \"Preservation\"])\n rarity_elements = soup.find(class_=[\"rarity-5\", \"rarity-4\"])\n char_elements = soup.find_all(class_=[\"Lightning\", \"Wind\", \"Fire\", \"Ice\", \"Quantum\", \"Imaginary\", \"Physical\"])\n\n char_name = ws.extract_char_name(url)\n\n # Append data to the dictionary\n data['Character'].append(char_name)\n\n for element in path_elements:\n data['Path'].append(element.text.replace('Path of ', ''))\n for element in rarity_elements:\n if element.text == '5' or element.text == '4':\n data['Rarity'].append(element.text)\n for element in char_elements:\n if element.text in ['Lightning', 'Wind', 'Physical', 'Fire', 'Ice', 'Quantum', 'Imaginary']:\n data['Element'].append(element.text)\n else:\n print(\"Failed to retrieve the webpage. Status code:\", response.status_code)\n\n except Exception as e:\n print(\"An error occurred:\", str(e))\n\n\ndef main():\n print('Automatically get urls: press 1')\n print('Manually get urls: press 2')\n\n while True:\n user_input = input('Enter number: ')\n\n if user_input == '1':\n user_input_list = get_urls.get_urls_auto()\n break\n elif user_input == '2':\n user_input_list = ws.enter_input()\n break\n else:\n print('Invalid input. Please enter 1 or 2.')\n\n data = {\"Character\": [], \"Path\": [], \"Rarity\": [], \"Element\": []}\n\n for url in user_input_list:\n scrape_paths_elements_rarities(url, data)\n\n # Create a DataFrame\n df = pd.DataFrame(data)\n\n output_name = f\"hsr_paths_rarities_elements.xlsx\"\n\n # Save the DataFrame to an Excel file\n df.to_excel(output_name, index=False)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sakan811/Honkai-Star-Rail-A-Few-Fun-Insights-with-Data-Analysis","sub_path":"codes/scrape_paths_elements_rarities.py","file_name":"scrape_paths_elements_rarities.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28881512672","text":"import json\nfrom copy import deepcopy\nfrom functools import lru_cache\nfrom os import makedirs, path\nfrom typing import Optional, Union\nfrom uuid import uuid4\n\nfrom kanmail.log import logger, setup_logging\n\nfrom .constants import (\n APP_DIR,\n CACHE_DIR,\n DEBUG,\n DEFAULT_WINDOW_HEIGHT,\n DEFAULT_WINDOW_LEFT,\n DEFAULT_WINDOW_TOP,\n DEFAULT_WINDOW_WIDTH,\n DEVICE_ID_FILE,\n ICON_CACHE_DIR,\n LOG_FILE,\n SETTINGS_FILE,\n WINDOW_CACHE_FILE,\n)\nfrom .model import fix_any_old_setings, get_default_settings, validate_settings\n\n# Bootstrap logging before we use logging!\n#\n\nfor needed_dir in (APP_DIR, CACHE_DIR, ICON_CACHE_DIR):\n if not path.exists(needed_dir):\n makedirs(needed_dir)\n\nsetup_logging(debug=DEBUG, log_file=LOG_FILE)\n\nlogger.debug(f\"App dir set to: {APP_DIR}\")\n\n\n# Device ID - totally random unique identifier for this install of Kanmail, this\n# is used to send anonymous debug, error and usage information.\n#\n\n\ndef get_device_id() -> str:\n if path.exists(DEVICE_ID_FILE):\n with open(DEVICE_ID_FILE, \"r\") as f:\n return f.read()\n\n random_device_id = str(uuid4())\n with open(DEVICE_ID_FILE, \"w\") as f:\n f.write(random_device_id)\n\n return random_device_id\n\n\n# \"App\"/user settings\n#\n\n\ndef _merge_settings(\n base_config: dict,\n new_config: dict,\n key_prefix: str = None,\n) -> list:\n changed_keys = []\n\n for key, value in new_config.items():\n # If this key is a dict in the old config, merge those\n if key in base_config and isinstance(value, dict):\n changed_keys.extend(\n _merge_settings(\n base_config[key],\n new_config[key],\n key_prefix=key,\n )\n )\n else:\n if base_config.get(key) != new_config[key]:\n base_config[key] = new_config[key]\n if key_prefix:\n changed_keys.append(f\"{key_prefix}.{key}\")\n else:\n changed_keys.append(key)\n\n return changed_keys\n\n\n@lru_cache(maxsize=1)\ndef get_settings() -> dict:\n settings = get_default_settings()\n\n if path.exists(SETTINGS_FILE):\n with open(SETTINGS_FILE, \"r\") as file:\n data = file.read()\n\n user_settings = json.loads(data)\n has_changed = fix_any_old_setings(user_settings)\n if has_changed:\n set_settings(user_settings)\n\n logger.debug(f\"Loaded settings: {user_settings}\")\n\n # Merge the user settings ontop of the defaults\n _merge_settings(settings, user_settings)\n\n return settings\n\n\ndef get_system_setting(\n key: str,\n default: Optional[dict] = None,\n) -> Union[None, str, int]:\n return get_settings()[\"system\"].get(key, default)\n\n\ndef get_style_setting(\n key: str,\n default: Optional[dict] = None,\n) -> Union[None, str, int]:\n return get_settings()[\"style\"].get(key, default)\n\n\ndef get_settings_copy():\n return deepcopy(get_settings())\n\n\ndef overwrite_settings(settings: dict) -> list:\n # \"Merge\" the settings to get the changed key list\n current_settings = get_settings_copy()\n changed_keys = _merge_settings(current_settings, settings)\n\n # Now just save the un-merged original\n set_settings(settings)\n return changed_keys\n\n\ndef update_settings(settings_updates: dict) -> list:\n settings = get_settings_copy()\n changed_keys = _merge_settings(settings, settings_updates)\n set_settings(settings)\n return changed_keys\n\n\ndef set_settings(new_settings: dict) -> None:\n validate_settings(new_settings)\n\n logger.debug(f\"Writing new settings: {new_settings}\")\n json_data = json.dumps(new_settings, indent=4)\n\n with open(SETTINGS_FILE, \"w\") as file:\n file.write(json_data)\n\n get_settings.cache_clear()\n\n\ndef get_window_settings() -> dict:\n settings = {\n \"width\": DEFAULT_WINDOW_WIDTH,\n \"height\": DEFAULT_WINDOW_HEIGHT,\n \"x\": DEFAULT_WINDOW_LEFT,\n \"y\": DEFAULT_WINDOW_TOP,\n }\n\n if path.exists(WINDOW_CACHE_FILE):\n with open(WINDOW_CACHE_FILE, \"r\") as f:\n data = json.load(f)\n\n logger.debug(f\"Loaded window settings: {data}\")\n\n for key, value in data.items():\n if key.startswith(\"WINDOW_\"): # COMPAT w/old style\n key = key.split(\"_\")[1].lower()\n logger.warning(f\"Updated old window setting: WINDOW_{key} -> {key}\")\n\n if key in settings:\n settings[key] = value\n\n return settings\n\n\ndef set_window_settings(width: int, height: int, left: int, top: int) -> None:\n window_settings = {\n \"width\": width,\n \"height\": height,\n \"x\": left,\n \"y\": top,\n }\n\n logger.debug(f\"Writing window settings: {window_settings}\")\n\n with open(WINDOW_CACHE_FILE, \"w\") as f:\n f.write(json.dumps(window_settings))\n","repo_name":"Oxygem/Kanmail","sub_path":"kanmail/settings/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","stars":1242,"dataset":"github-code","pt":"81"} +{"seq_id":"23301083545","text":"# python 根据category.py 的json 去创建文件夹/文件,需要多点几下\n\nfrom category import category\nimport os\nimport re\n\ndef create_file(path_name, file_name):\n # a模式 是append ,w 是覆写\n with open(path_name + file_name + \".md\", \"w\") as f:\n f.write(\"\")\n\n# 根据目录去生成文件路径和文件\ndef create_dir(array, n=0, parent_path=\"../docs/\"):\n if type(array) != list:\n print(\"入参Array 非list,循环结束\")\n return\n n += 1\n for obj in array:\n # print(str(n), obj, \"\\n\")\n if type(obj) == dict:\n key_name = ''.join(obj.keys())\n if not os.path.exists(parent_path + key_name):\n os.mkdir(parent_path + key_name)\n else:\n values = obj.values()\n [item_list] = values or [[]] # 解构第一层list\n # 写入文件\n if len(item_list) == 1:\n [file_name] = item_list\n create_file(parent_path + key_name + \"/\", file_name)\n else:\n create_dir(item_list, n, parent_path + key_name + \"/\")\n if type(obj) == str:\n # 含有空格的str,转为\"_\"\n str_name = re.sub(r' ', '_', obj)\n create_file(parent_path, str_name)\n print(n, \"个文件/路径\")\n\ncreate_dir(category, 0, \"../docs/\")\n","repo_name":"veaba/tensorflow-docs","sub_path":"scripts/create_markdown.py","file_name":"create_markdown.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18088420374","text":"'''\nRoam Research Time Machine\n\n- Backup Roam Research image files.\n- Support incremental backup.\n\nScript steps:\n1. Get file download link.\n2. Check local file folder is exist file.\n3. If not exist file download it.\n'''\nimport os\nimport re\nimport requests\nimport shutil\n\nfrom pathlib import Path\nfrom urllib.parse import urlparse\n\nroam_export = Path('roam_export')\nfiles = Path('roam_export/files')\n\nmd_files = roam_export.glob('*.md')\nfile_url_pattern = \"(?Phttps?://firebasestorage.[^\\s]+)\"\n\nproxies = {\n 'http': 'http://127.0.0.1:1080',\n 'https': 'http://127.0.0.1:1080',\n}\n\ndef is_exist_file(fn):\n '''\n Is file exist in roam export folder.\n '''\n return len(list(roam_export.glob('**/' + fn))) > 0\n\nprint('------------------------------\\nStart Roam Time Machine task.\\n------------------------------')\n\nfor md in md_files:\n content = open(md, mode='r', encoding='utf8')\n lines = content.readlines()\n for line in lines:\n # Skip line by url prefix\n if not 'https://firebasestorage.' in line:\n continue\n\n url = re.search(file_url_pattern, line).group('url')\n url = url.replace(')', '') # Remove extract url end character )\n # Get file name from parsed url\n parsed_url = urlparse(url)\n file_name = parsed_url.path.split('%2F')[-1]\n file_save_path = Path.joinpath(files, file_name)\n print('Check downloaded file from url:\\n{0}'.format(url))\n if is_exist_file(file_name):\n #Skip downloaded file\n print('Skip downloaded file: {0}'.format(file_name))\n continue\n\n print('Start download: {0}'.format(url))\n # Get image use proxy\n response = requests.get(url, stream=True, proxies=proxies)\n with open(file_save_path, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n del response\n print('Saved to: {0}'.format(file_save_path))\n\n \nprint('------------------------------\\nAll task has been completed!\\n------------------------------')\n","repo_name":"yaqinking/RoamResearchTimeMachine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34122878142","text":"from tweepy import API\r\nfrom tweepy import Cursor\r\nfrom tweepy.streaming import StreamListener\r\nfrom tweepy import OAuthHandler\r\nfrom tweepy import Stream\r\nfrom twitter_credentials import *\r\n\r\n\r\nclass TwitterClient:\r\n def __init__(self, twitter_user=None):\r\n self.auth = TwitterAuthenticator().authenticate_twitter_app()\r\n self.twitter_client = API(self.auth)\r\n self.twitter_user = twitter_user\r\n\r\n def get_user_timeline_tweets(self, num_tweets): # If an id is not specified it will get your own timeline\r\n tweets = []\r\n for tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_tweets):\r\n tweets.append(tweet)\r\n return tweets\r\n\r\n def get_friend_list(self, num_friends):\r\n friend_list = []\r\n for friend in Cursor(self.twitter_client.friends, id=self.twitter_user).items(num_friends):\r\n friend_list.append(friend)\r\n return friend_list\r\n\r\n def get_home_timeline_tweets(self, num_tweets):\r\n home_timeline_tweets = []\r\n for tweet in Cursor(self.twitter_client.home_timeline, id=self.twitter_user).items(num_tweets):\r\n home_timeline_tweets.append(tweet)\r\n return home_timeline_tweets\r\n\r\n def publish_tweet(self, text):\r\n self.twitter_client.update_status(text)\r\n\r\n def tweet_image(self, message, filenames):\r\n if type(filenames) is not list:\r\n self.twitter_client.update_with_media(status=message, filename=filenames)\r\n return\r\n media_ids = []\r\n for filename in filenames:\r\n res = self.twitter_client.media_upload(filename)\r\n media_ids.append(res.media_id)\r\n self.twitter_client.update_status(status=message, media_ids=media_ids)\r\n\r\n\r\nclass TwitterAuthenticator:\r\n @staticmethod\r\n def authenticate_twitter_app():\r\n auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\r\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\r\n return auth\r\n\r\n\r\nclass TwitterStreamer:\r\n def __init__(self):\r\n self.twitter_authenticator = TwitterAuthenticator()\r\n\r\n def stream_tweets(self, hashtag_list):\r\n # Twitter authentification and connection\r\n listener = TwitterListener()\r\n stream = Stream(self.twitter_authenticator.authenticate_twitter_app(), listener)\r\n stream.filter(track=hashtag_list)\r\n\r\n\r\nclass TwitterListener(StreamListener):\r\n\r\n def __init__(self, *fetched_tweets_filename):\r\n self.fetched_tweets_filename = fetched_tweets_filename\r\n self.i = 0\r\n\r\n def on_data(self, data, filename): # Méthode ovveridden\r\n try:\r\n file = open(filename, \"a\")\r\n file.write(data)\r\n self.i = self.i + 1\r\n except BaseException as e:\r\n print(\"Error on_data %s\" % str(e))\r\n return self.i < 5 # True keeps connection going , False kills connection\r\n\r\n def on_error(self, status):\r\n if status == 420: # Returns 420 in case rate limit occurs\r\n return False\r\n print(status)\r\n\r\n","repo_name":"youssk541/fnstatz","sub_path":"docs/tweepy_streamer.py","file_name":"tweepy_streamer.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8594727031","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 2 15:21:37 2017\n\n@author: alxgr\n\"\"\"\nimport numpy as np\nfrom AlexRobotics.dynamic import Manipulator\nfrom AlexRobotics.control import ComputedTorque as CTC\n\ndef ctl( x , t ):\n \n u = np.array([0,0])\n \n ################################\n # Your controller here\n\n R = Manipulator.TwoLinkManipulator()\n \n # Define controller\n ctc = CTC.ComputedTorqueController( R )\n ctc.w0 = 1\n \n u = ctc.ctl( x , t )\n \n \n #################################\n return u","repo_name":"ali493/pyro","sub_path":"old/tests/auto-testing/students/bob.py","file_name":"bob.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37713027458","text":"# 字母异位词分组\nfrom typing import List\nfrom collections import defaultdict\n\n\nclass Solution:\n # def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n # dic = {}\n # for s in strs:\n # tmp = ''.join(sorted(s))\n # if tmp in dic:\n # dic[tmp].append(s)\n # else:\n # dic[tmp] = [s]\n #\n # # return [_ for _ in dic.values()]\n # return list(dic.values())\n\n # def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n # dic = defaultdict(list)\n # for s in strs:\n # dic[tuple(sorted(s))].append(s)\n # return list(dic.values())\n\n # def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n # dic = defaultdict(list)\n # for word in strs:\n # dic[tuple(sorted(word))].append(word)\n # # print(isinstance(iter(dic.values()), collections.Iterator)) # 加了 iter 就变成了迭代器,哈哈,不加是可迭代对象\n # return list(dic.values())\n\n # def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n # dic = defaultdict(list)\n # for arr in strs:\n # dic[tuple(sorted(arr))].append(arr)\n # return list(dic.values())\n\n # def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n # record = defaultdict(list)\n # for c in strs:\n # record[tuple(sorted(c))].append(c)\n # return list(record.values())\n\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n dic = defaultdict(list)\n for attr in strs:\n dic[tuple(sorted(attr))].append(attr)\n return list(dic.values())\n\ns = Solution()\na = [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]\nprint(s.groupAnagrams(a))\n\nstrs = [\"\"]\nprint(s.groupAnagrams(strs))\n\nstrs = [\"a\"]\nprint(s.groupAnagrams(strs))\n","repo_name":"BruceHi/leetcode","sub_path":"month6/groupAnagrams.py","file_name":"groupAnagrams.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41233478793","text":"import logging\n\n# @manual=third-party//scipy:scipy-py\nfrom typing import Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom reagent.core.tracker import observable\nfrom reagent.evaluation.evaluation_data_page import EvaluationDataPage\nfrom reagent.models.seq2slate import Seq2SlateMode\nfrom reagent.training.ranking.seq2slate_trainer import Seq2SlateTrainer\nfrom reagent.types import PreprocessedTrainingBatch\n\n\nlogger = logging.getLogger(__name__)\n\n\n@observable(\n eval_baseline_loss=torch.Tensor,\n eval_advantages=torch.Tensor,\n logged_slate_rank_probs=torch.Tensor,\n ranked_slate_rank_probs=torch.Tensor,\n eval_data_pages_g=EvaluationDataPage,\n eval_data_pages_ng=EvaluationDataPage,\n)\nclass RankingPolicyGradientEvaluator:\n \"\"\" Evaluate ranking models that are learned through policy gradient \"\"\"\n\n def __init__(\n self,\n trainer: Seq2SlateTrainer,\n calc_cpe: bool,\n reward_network: Optional[nn.Module] = None,\n ) -> None:\n assert not calc_cpe or reward_network is not None\n self.trainer = trainer\n self.calc_cpe = calc_cpe\n self.reward_network = reward_network\n\n # Evaluate greedy/non-greedy version of the ranking model\n self.eval_data_pages_g: Optional[EvaluationDataPage] = None\n self.eval_data_pages_ng: Optional[EvaluationDataPage] = None\n\n # pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because\n # its type `no_grad` is not callable.\n @torch.no_grad()\n def evaluate(self, eval_tdp: PreprocessedTrainingBatch) -> None:\n seq2slate_net = self.trainer.seq2slate_net\n seq2slate_net_prev_mode = seq2slate_net.training\n seq2slate_net.eval()\n\n logged_slate_rank_prob = torch.exp(\n seq2slate_net(\n eval_tdp.training_input, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE\n )\n .log_probs.detach()\n .flatten()\n .cpu()\n )\n\n eval_baseline_loss = torch.tensor([0.0]).reshape(1)\n if self.trainer.baseline_net:\n baseline_net = self.trainer.baseline_net\n # pyre-fixme[16]: `Optional` has no attribute `training`.\n baseline_net_prev_mode = baseline_net.training\n # pyre-fixme[16]: `Optional` has no attribute `eval`.\n baseline_net.eval()\n # pyre-fixme[29]: `Optional[reagent.models.seq2slate.BaselineNet]` is\n # not a function.\n b = baseline_net(eval_tdp.training_input).detach()\n eval_baseline_loss = (\n F.mse_loss(b, eval_tdp.training_input.slate_reward).cpu().reshape(1)\n )\n # pyre-fixme[16]: `Optional` has no attribute `train`.\n baseline_net.train(baseline_net_prev_mode)\n else:\n b = torch.zeros_like(eval_tdp.training_input.slate_reward)\n\n eval_advantage = (\n # pyre-fixme[6]: `-` is not supported for operand types\n # `Optional[torch.Tensor]` and `Any`.\n (eval_tdp.training_input.slate_reward - b)\n .flatten()\n .cpu()\n )\n\n ranked_slate_output = seq2slate_net(\n eval_tdp.training_input, Seq2SlateMode.RANK_MODE, greedy=True\n )\n ranked_slate_rank_prob = torch.prod(\n torch.gather(\n ranked_slate_output.ranked_tgt_out_probs,\n 2,\n ranked_slate_output.ranked_tgt_out_idx.unsqueeze(-1),\n ).squeeze(),\n -1,\n ).cpu()\n\n seq2slate_net.train(seq2slate_net_prev_mode)\n\n if not self.calc_cpe:\n return\n\n edp_g = EvaluationDataPage.create_from_tensors_seq2slate(\n seq2slate_net,\n self.reward_network,\n eval_tdp.training_input,\n eval_greedy=True,\n )\n if self.eval_data_pages_g is None:\n self.eval_data_pages_g = edp_g\n else:\n # pyre-fixme[16]: `Optional` has no attribute `append`.\n self.eval_data_pages_g = self.eval_data_pages_g.append(edp_g)\n\n edp_ng = EvaluationDataPage.create_from_tensors_seq2slate(\n seq2slate_net,\n self.reward_network,\n eval_tdp.training_input,\n eval_greedy=False,\n )\n if self.eval_data_pages_ng is None:\n self.eval_data_pages_ng = edp_ng\n else:\n self.eval_data_pages_ng = self.eval_data_pages_ng.append(edp_ng)\n\n # pyre-fixme[16]: `RankingPolicyGradientEvaluator` has no attribute\n # `notify_observers`.\n self.notify_observers(\n eval_baseline_loss=eval_baseline_loss,\n eval_advantages=eval_advantage,\n logged_slate_rank_probs=logged_slate_rank_prob,\n ranked_slate_rank_probs=ranked_slate_rank_prob,\n )\n\n @torch.no_grad()\n def evaluate_post_training(self):\n self.notify_observers(\n # Use ValueListObserver as aggregating_observers requires input to be Tensor\n eval_data_pages_g=self.eval_data_pages_g,\n eval_data_pages_ng=self.eval_data_pages_ng,\n )\n self.eval_data_pages_g = None\n self.eval_data_pages_ng = None\n","repo_name":"UofT-EcoSystem/rlscope_ReAgent","sub_path":"reagent/evaluation/ranking_policy_gradient_evaluator.py","file_name":"ranking_policy_gradient_evaluator.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31224760996","text":"from datetime import datetime\nfrom pathlib import Path\n\n\nimport rayleaf\nfrom rayleaf.entities import Server, Client\n\n\ndef fedavg(\n dataset: str,\n output_dir: str,\n num_rounds: int = 100,\n eval_every: int = 10,\n num_clients: int = 200,\n clients_per_round: int = 40,\n client_lr: float = 0.05,\n batch_size: int = 64,\n seed: int = 0,\n num_epochs: int = 10,\n gpus_per_client_cluster: float = 1,\n num_client_clusters: int = 8,\n save_model: bool = False,\n notes: str = None\n):\n curr_time = datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n\n class SmallClient(Client):\n def init(self):\n self.delete_model_on_completion = True\n\n if dataset == \"femnist\":\n model = \"cnn\"\n elif dataset == \"speech_commands\":\n model = \"m5\"\n elif dataset == \"shakespeare\":\n model = \"stacked_lstm\"\n\n rayleaf.run_experiment(\n dataset = dataset,\n dataset_dir = f\"data/{dataset}/\",\n output_dir= Path(output_dir, dataset, \"fedavg\", f\"{num_clients}clients-{clients_per_round}cpr-{client_lr}lr-{num_epochs}epochs-{num_rounds}rounds\"),\n model = model,\n num_rounds = num_rounds,\n eval_every = eval_every,\n ServerType=Server,\n client_types=[(SmallClient, num_clients)],\n clients_per_round = clients_per_round,\n client_lr = client_lr,\n batch_size = batch_size,\n seed = seed,\n use_val_set = False,\n num_epochs = num_epochs,\n gpus_per_client_cluster = gpus_per_client_cluster,\n num_client_clusters = num_client_clusters,\n save_model = save_model,\n notes = notes\n )\n","repo_name":"rizhu/rayleaf-all-experiments","sub_path":"fedavg.py","file_name":"fedavg.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12973812998","text":"\nfrom django.utils.translation import gettext_lazy as _\nfrom django.forms import ModelForm, TextInput, NumberInput, Select, FileInput\nfrom auctions.models import Bid, Comments, ListingItem\n\nclass CreateListing(ModelForm):\n class Meta:\n model = ListingItem\n exclude = ['sold', 'lister']\n labels = {\n 'item_name': _('Item Name'),\n 'description': _('Description'),\n 'base_auction_price': _('Starting Bid Price'),\n 'auction_time_limit': _('How long should the auction run for?'),\n 'selling_price': _('Selling Price'),\n 'category': _('Category'),\n 'images': _('Upload Images')\n }\n widgets = {\n 'item_name': TextInput(attrs={'class': 'form-control w-75 p-3'}),\n 'description': TextInput(attrs={'class': 'form-control w-75 p-3'}),\n 'base_auction_price': NumberInput(attrs={'class': 'form-control w-75 p-3'}),\n 'selling_price': NumberInput(attrs={'class': 'form-control w-75 p-3'}),\n 'category': Select(attrs={'class': 'form-select w-75'}),\n \n }\n\nclass AddComment(ModelForm):\n class Meta:\n model = Comments\n exclude= [\"user\", \"item_name\", \"up_votes\"]\n labels = {\n \"comment\": _(\"Comment\")\n }\n widgets = {\n \"comment\": TextInput(attrs={'class': 'form-control w75 p-3'})\n }\n\nclass AddBid(ModelForm):\n class Meta:\n model = Bid\n exclude= [\"bidder\", \"listing_item\"]\n labels = {\n \"bidding_price\": _(\"Place Bid\")\n }\n widgets = {\n \"bidding_price\": NumberInput(attrs={'class': 'form-control w75 p-3'})\n }","repo_name":"Anisulh/eBuy","sub_path":"auctions/forms/auctions/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35557748266","text":"from datetime import datetime\n\nimport pandas as pd\nfrom typing_extensions import TypedDict\n\nfrom labsdk.raptor import data_source, Context, feature, aggregation, AggregationFunction, freshness, model, manifests, \\\n keep_previous, TrainingContext, StreamingConfig\n\n\n# getting started code\n\n@data_source(\n training_data=pd.read_parquet(\n 'https://gist.github.com/AlmogBaku/8be77c2236836177b8e54fa8217411f2/raw/emails.parquet'),\n keys=['id', 'account_id'],\n timestamp='event_at',\n production_config=StreamingConfig(kind='kafka'),\n)\nclass Email(TypedDict('Email', {'from': str})):\n event_at: datetime\n account_id: str\n subject: str\n to: str\n\n\n@feature(keys='account_id', data_source=Email)\n@aggregation(function=AggregationFunction.Count, over='10h', granularity='1h')\ndef emails_10h(this_row: Email, ctx: Context) -> int:\n \"\"\"email over 10 hours\"\"\"\n return 1\n\n\n@feature(keys='account_id', data_source=Email)\n@aggregation(function=AggregationFunction.Avg, over='10h', granularity='1h')\ndef question_marks_10h(this_row: Email, ctx: Context) -> int:\n \"\"\"question marks over 10 hours\"\"\"\n return this_row['subject'].count('?')\n\n\nprint('# Emails')\nprint(f'```\\n{Email.manifest()}\\n```')\nprint('## Feature: `emails_10h`')\nprint(f'```\\n{emails_10h.manifest()}\\n```')\nprint('### Replayed')\nprint(emails_10h.replay().to_markdown())\n\n\n@data_source(\n training_data=pd.read_csv(\n 'https://gist.githubusercontent.com/AlmogBaku/8be77c2236836177b8e54fa8217411f2/raw/deals.csv'),\n keys=['id', 'account_id'],\n timestamp='event_at',\n)\nclass Deal(TypedDict):\n id: int\n event_at: pd.Timestamp\n account_id: str\n amount: float\n\n\n@feature(keys='account_id', data_source=Deal)\n@aggregation(\n function=[AggregationFunction.Sum, AggregationFunction.Avg, AggregationFunction.Max, AggregationFunction.Min],\n over='10h',\n granularity='1m'\n)\ndef deals_10h(this_row: Deal, ctx: Context) -> float:\n \"\"\"sum/avg/min/max of deal amount over 10 hours\"\"\"\n return this_row['amount']\n\n\n@feature(keys='account_id', sourceless_markers_df=Deal.raptor_spec.local_df)\n@freshness(max_age='-1', max_stale='-1')\ndef emails_deals(_, ctx: Context) -> float:\n \"\"\"emails/deal[avg] rate over 10 hours\"\"\"\n e, _ = ctx.get_feature('emails_10h+count')\n d, _ = ctx.get_feature('deals_10h+avg')\n if e is None or d is None:\n return None\n return e / d\n\n\n@feature(keys='account_id', data_source=Deal)\n@freshness(max_age='1h', max_stale='2h')\n@keep_previous(versions=1, over='1h')\ndef last_amount(this_row: Deal, ctx: Context) -> float:\n return this_row['amount']\n\n\n@feature(keys='account_id', sourceless_markers_df=Deal.raptor_spec.local_df)\n@freshness(max_age='1h', max_stale='2h')\ndef diff_with_previous_price(this_row: Deal, ctx: Context) -> float:\n lv, ts = ctx.get_feature('last_amount@-1')\n if lv is None:\n return 0\n return this_row['amount'] - lv\n\n\nprint('# Deals')\nprint(f'```\\n{Deal.manifest()}\\n```')\nprint('## Feature: `deals_10h`')\nprint(f'```\\n{deals_10h.manifest()}\\n```')\nprint(f'### Replayed')\nprint(deals_10h.replay().to_markdown())\nprint(f'## Feature: `emails_deals`')\nprint(f'```\\n{emails_deals.manifest()}\\n```')\nprint('### Replayed')\nprint(emails_deals.replay().to_markdown())\nprint(f'## Feature: `last_amount`')\nprint(f'```\\n{last_amount.manifest()}\\n```')\nprint('### Replayed')\nprint(last_amount.replay().to_markdown())\nprint(f'## Feature: `diff_with_previous_price`')\nprint(f'```\\n{diff_with_previous_price.manifest()}\\n```')\nprint('### Replayed')\nprint(diff_with_previous_price.replay().to_markdown())\n\n\n@model(\n keys=['account_id'],\n input_features=[\n 'emails_10h+count', 'deals_10h+sum', emails_deals, diff_with_previous_price\n ],\n input_labels=[last_amount],\n model_framework='xgboost',\n model_server='sagemaker-ack',\n)\n@freshness(max_age='1h', max_stale='100h')\ndef deal_prediction(ctx: TrainingContext) -> float:\n from xgboost import XGBClassifier\n from sklearn.model_selection import train_test_split\n\n df = ctx.features_and_labels()\n X = df[ctx.input_features]\n y = df[ctx.input_labels]\n\n # Split the data into training and testing sets\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n\n # Initialize an XGBoost model\n xgb_model = XGBClassifier()\n\n # Fit the model to the training data\n from sklearn.preprocessing import LabelEncoder\n le = LabelEncoder()\n y_train = le.fit_transform(y_train)\n xgb_model.fit(X_train, y_train)\n\n # Evaluate the model on the testing data\n accuracy = xgb_model.score(X_test, y_test)\n\n # Make sure the model has a minimum accuracy of 0.7\n if accuracy < 0.7:\n raise Exception('Accuracy is below 0.7')\n\n return xgb_model\n\n\nprint('# Model')\nm = deal_prediction.train()\ndf = deal_prediction.features_and_labels(since=pd.to_datetime('2020-1-1'), until=pd.to_datetime('2022-12-31'))\nprint(df.to_markdown())\n\n\n# counters\n@feature(keys='account_id', data_source=Deal)\n@aggregation(function=AggregationFunction.Count, over='9999984h', granularity='9999984h')\ndef views(this_row: Deal, ctx: Context) -> int:\n return 1\n\n\nprint('# Views')\nprint(f'```\\n{views.manifest()}\\n```')\nprint('## Replayed')\nprint(views.replay().to_markdown())\n\n# gong\ncrm_records_df = pd.DataFrame.from_records([\n {'event_at': '2022-01-01 12:00:00+00:00', 'salesman_id': 'ada', 'action': 'deal_assigned', 'opportunity_id': 15},\n {'event_at': '2022-02-01 13:10:00+00:00', 'salesman_id': 'ada', 'action': 'deal_removed', 'opportunity_id': 15},\n {'event_at': '2022-04-01 13:20:00+00:00', 'salesman_id': 'ada', 'action': 'deal_assigned', 'opportunity_id': 15},\n {'event_at': '2022-06-01 14:00:00+00:00', 'salesman_id': 'ada', 'action': 'deal_closed', 'opportunity_id': 25},\n {'event_at': '2022-06-01 14:10:00+00:00', 'salesman_id': 'ada', 'action': 'deal_assigned', 'opportunity_id': 17},\n {'event_at': '2022-07-01 14:20:00+00:00', 'salesman_id': 'ada', 'action': 'deal_removed', 'opportunity_id': 17},\n {'event_at': '2022-08-01 14:30:00+00:00', 'salesman_id': 'ada', 'action': 'deal_assigned', 'opportunity_id': 17},\n {'event_at': '2022-09-01 14:40:00+00:00', 'salesman_id': 'ada', 'action': 'deal_closed', 'opportunity_id': 17},\n {'event_at': '2022-11-01 15:30:00+00:00', 'salesman_id': 'ada', 'action': 'deal_removed', 'opportunity_id': 17},\n {'event_at': '2022-01-01 12:00:00+00:00', 'salesman_id': 'brian', 'action': 'deal_assigned', 'opportunity_id': 132},\n {'event_at': '2022-02-01 12:20:00+00:00', 'salesman_id': 'brian', 'action': 'deal_removed', 'opportunity_id': 132},\n {'event_at': '2022-02-01 13:40:00+00:00', 'salesman_id': 'brian', 'action': 'deal_assigned', 'opportunity_id': 132},\n {'event_at': '2022-04-01 15:00:00+00:00', 'salesman_id': 'brian', 'action': 'deal_closed', 'opportunity_id': 132},\n {'event_at': '2022-05-01 15:10:00+00:00', 'salesman_id': 'brian', 'action': 'deal_removed', 'opportunity_id': 132},\n {'event_at': '2022-06-01 15:20:00+00:00', 'salesman_id': 'brian', 'action': 'deal_assigned', 'opportunity_id': 544},\n {'event_at': '2022-07-01 15:30:00+00:00', 'salesman_id': 'brian', 'action': 'deal_removed', 'opportunity_id': 544},\n {'event_at': '2022-08-01 15:40:00+00:00', 'salesman_id': 'brian', 'action': 'deal_assigned', 'opportunity_id': 544},\n {'event_at': '2022-09-01 15:50:00+00:00', 'salesman_id': 'brian', 'action': 'deal_closed', 'opportunity_id': 544},\n {'event_at': '2022-10-01 16:00:00+00:00', 'salesman_id': 'brian', 'action': 'deal_assigned', 'opportunity_id': 233},\n {'event_at': '2022-11-01 16:10:00+00:00', 'salesman_id': 'brian', 'action': 'deal_closed', 'opportunity_id': 233},\n {'event_at': '2022-12-01 16:20:00+00:00', 'salesman_id': 'brian', 'action': 'deal_assigned', 'opportunity_id': 444},\n {'event_at': '2022-12-01 16:30:00+00:00', 'salesman_id': 'brian', 'action': 'deal_closed', 'opportunity_id': 444},\n])\n\n\n@data_source(training_data=crm_records_df, keys=['salesman_id', 'opportunity_id'])\nclass CrmRecord(TypedDict):\n event_at: datetime\n salesman_id: str\n action: str\n opportunity_id: int\n\n\n@feature(keys='salesman_id', data_source=CrmRecord)\n@aggregation(function=AggregationFunction.DistinctCount, over='8760h', granularity='24h')\ndef unique_deals_involvement_annually(this_row: CrmRecord, ctx: Context) -> int:\n if this_row['action'] == 'deal_assigned':\n return this_row['opportunity_id']\n return None\n\n\nunique_deals_involvement_annually.replay()\n\n\n@feature(keys='salesman_id', data_source=CrmRecord)\n@aggregation(function=AggregationFunction.DistinctCount, over='8760h', granularity='24h')\ndef closed_deals_annually(this_row: CrmRecord, ctx: Context) -> int:\n if this_row['action'] == 'deal_closed':\n return 1\n return None\n\n\nclosed_deals_annually.replay()\n\n\n@feature(keys='salesman_id', data_source=CrmRecord)\n@freshness(max_age='24h', max_stale='8760h')\ndef salesperson_deals_closes_rate(this_row: CrmRecord, ctx: Context) -> int:\n udia, _ = ctx.get_feature('unique_deals_involvement_annually+distinct_count')\n cda, _ = ctx.get_feature('closed_deals_annually+count')\n if udia is None or cda is None:\n return None\n return udia / cda\n\n\nsalesperson_deals_closes_rate.replay()\n\n# other tests\n\n\ndf = pd.DataFrame.from_records([\n {'event_at': '2022-01-01 12:00:00+00:00', 'account_id': 'ada', 'subject': 'wrote_code', 'commit_count': 1},\n {'event_at': '2022-01-01 13:10:00+00:00', 'account_id': 'ada', 'subject': 'wrote_code', 'commit_count': 1},\n {'event_at': '2022-01-01 13:20:00+00:00', 'account_id': 'ada', 'subject': 'fixed_bug', 'commit_count': 1},\n {'event_at': '2022-01-01 14:00:00+00:00', 'account_id': 'ada', 'subject': 'deployed', 'commit_count': 3},\n {'event_at': '2022-01-01 14:10:00+00:00', 'account_id': 'ada', 'subject': 'developed', 'commit_count': 1},\n {'event_at': '2022-01-01 14:20:00+00:00', 'account_id': 'ada', 'subject': 'built_model', 'commit_count': 4},\n {'event_at': '2022-01-01 14:30:00+00:00', 'account_id': 'ada', 'subject': 'wrote_code', 'commit_count': 3},\n {'event_at': '2022-01-01 14:40:00+00:00', 'account_id': 'ada', 'subject': 'experimented', 'commit_count': 2},\n {'event_at': '2022-01-01 15:30:00+00:00', 'account_id': 'ada', 'subject': 'wrote_code', 'commit_count': 1},\n {'event_at': '2022-01-01 12:00:00+00:00', 'account_id': 'brian', 'subject': 'developed', 'commit_count': 1},\n {'event_at': '2022-01-01 12:20:00+00:00', 'account_id': 'brian', 'subject': 'wrote_code', 'commit_count': 2},\n {'event_at': '2022-01-01 13:40:00+00:00', 'account_id': 'brian', 'subject': 'experimented', 'commit_count': 1},\n {'event_at': '2022-01-01 15:00:00+00:00', 'account_id': 'brian', 'subject': 'developed', 'commit_count': 1},\n {'event_at': '2022-01-01 15:10:00+00:00', 'account_id': 'brian', 'subject': 'wrote_code', 'commit_count': 4},\n {'event_at': '2022-01-01 15:20:00+00:00', 'account_id': 'brian', 'subject': 'developed', 'commit_count': 5},\n {'event_at': '2022-01-01 15:30:00+00:00', 'account_id': 'brian', 'subject': 'wrote_code', 'commit_count': 1},\n {'event_at': '2022-01-01 15:40:00+00:00', 'account_id': 'brian', 'subject': 'experimented', 'commit_count': 2},\n {'event_at': '2022-01-01 15:50:00+00:00', 'account_id': 'brian', 'subject': 'developed', 'commit_count': 1},\n {'event_at': '2022-01-01 16:00:00+00:00', 'account_id': 'brian', 'subject': 'wrote_code', 'commit_count': 2},\n {'event_at': '2022-01-01 16:10:00+00:00', 'account_id': 'brian', 'subject': 'built_model', 'commit_count': 1},\n {'event_at': '2022-01-01 16:20:00+00:00', 'account_id': 'brian', 'subject': 'built_model', 'commit_count': 1},\n {'event_at': '2022-01-01 16:30:00+00:00', 'account_id': 'brian', 'subject': 'experimented', 'commit_count': 3},\n])\n\n\n@data_source(training_data=df, keys='account_id', timestamp='event_at')\nclass Commit(TypedDict):\n event_at: datetime\n account_id: str\n subject: str\n commit_count: int\n\n\n@feature(keys='account_id', data_source=Commit)\n@freshness(max_age='1m', max_stale='10m')\ndef subject(this_row: Commit, ctx: Context) -> str:\n return this_row['subject']\n\n\nsubject.replay()\n\n\n@feature(keys='account_id', data_source=Commit)\n@aggregation(function=AggregationFunction.DistinctCount, over='2h', granularity='10m')\ndef unique_tasks_over_2h(this_row: Commit, ctx: Context) -> str:\n return this_row['subject']\n\n\nunique_tasks_over_2h.replay()\n\n\n@feature(keys='account_id', data_source=Commit)\n@aggregation(\n function=[AggregationFunction.Sum, AggregationFunction.Count, AggregationFunction.Max],\n over='30m', granularity='1m')\ndef commits_30m(this_row: Commit, ctx: Context) -> int:\n \"\"\"sum/max/count of commits over 30 minutes\"\"\"\n\n return this_row['commit_count']\n\n\ncommits_30m.replay()\n\n\n@feature(keys='account_id', data_source=Commit)\n@freshness(max_age='1m', max_stale='30m')\ndef commits_30m_greater_2(this_row: Commit, ctx: Context) -> bool:\n res, _ = ctx.get_feature('commits_30m+sum')\n return res > 2\n\n\ncommits_30m_greater_2.replay()\n\n\n@model(\n keys=['account_id'],\n input_features=[\n 'commits_30m+sum', commits_30m_greater_2\n ],\n input_labels=[],\n model_framework='sklearn',\n)\n@freshness(max_age='1h', max_stale='100h')\ndef newest():\n # TODO: implement\n pass\n\n\nprint(manifests())\n\nret = newest.features_and_labels(since=pd.to_datetime('2019-12-04 00:00'), until=pd.to_datetime('2023-01-04 00:00'))\nprint(ret)\n","repo_name":"raptor-ml/raptor","sub_path":"labsdk/_test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13491,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"81"} +{"seq_id":"20310066991","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\nimport numpy as N\nfrom scipy import ndimage, stats\n\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas\nfrom matplotlib.backends.backend_gtk3 import NavigationToolbar2GTK3 as NavigationToolbar\nfrom matplotlib.widgets import Cursor\n\n\nclass PopUpFringes(object):\n\tdef __init__(self, xdata, xlabel, ylabel, title):\n\t\tself.popupwin=Gtk.Window()\n\t\tself.popupwin.set_size_request(600,550)\n\t\tself.popupwin.set_position(Gtk.WindowPosition.CENTER)\n\t\tself.popupwin.set_border_width(10)\n\t\tself.xdata = xdata\n\t\tvbox = Gtk.VBox()\n\t\tself.fig=Figure(dpi=100)\n\t\tself.ax = self.fig.add_subplot(111)\n\t\tself.canvas = FigureCanvas(self.fig)\n\t\tself.main_figure_navBar = NavigationToolbar(self.canvas, self)\n\t\tself.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)\n\t\tself.ax.set_xlabel(xlabel, fontsize = 18)\n\t\tself.ax.set_ylabel(ylabel, fontsize = 18)\n\t\tself.ax.set_title(title, fontsize = 18)\n\t\t\n\t\txi = N.arange(len(self.xdata))\t\t\n\t\tslope, intercept, r_value, p_value, std_err = stats.linregress(self.xdata,xi)\n\t\tfitline = slope*self.xdata+intercept\n\t\t\n\t\tself.ax.plot(self.xdata, fitline, 'r-',self.xdata,xi, 'bo')\n\t\tself.ax.axis([self.xdata.min(),self.xdata.max(),xi.min()-1, xi.max()+1])\n\t\t\n\t\tself.ax.text(0.3, 0.9,'Slope = %.4f +- %.4f' % (slope, std_err),\n\t\t\t\t\t\t\t\thorizontalalignment='center',\n\t\t\t\t\t\t\t\tverticalalignment='center',\n\t\t\t\t\t\t\t\ttransform = self.ax.transAxes,\n\t\t\t\t\t\t\t\tcolor='red')\n\t\tvbox.pack_start(self.main_figure_navBar, False, False, 0)\n\t\tvbox.pack_start(self.canvas, True, True, 2)\n\t\tself.popupwin.add(vbox)\n\t\tself.popupwin.connect(\"destroy\", self.dest)\n\t\tself.popupwin.show_all()\n\t\n\tdef dest(self,widget):\n\t\tself.popupwin.destroy()\n\t\nclass PopUpImage(object):\n\tdef __init__(self, xdata, ydata, xlabel, ylabel, title):\n\t\tself.popupwin=Gtk.Window()\n\t\tself.popupwin.set_size_request(600,550)\n\t\tself.popupwin.set_position(Gtk.WindowPosition.CENTER)\n\t\tself.popupwin.set_border_width(10)\n\t\tself.xdata = xdata\n\t\tself.ydata = ydata\n\t\tvbox = Gtk.VBox()\n\t\tself.fig=Figure(dpi=100)\n\t\tself.ax = self.fig.add_subplot(111)\n\t\tself.canvas = FigureCanvas(self.fig)\n\t\tself.main_figure_navBar = NavigationToolbar(self.canvas, self)\n\t\tself.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)\n\t\tself.canvas.mpl_connect(\"button_press_event\",self.on_press)\n\t\tself.ax.set_xlabel(xlabel, fontsize = 18)\n\t\tself.ax.set_ylabel(ylabel, fontsize = 18)\n\t\tself.ax.set_title(title, fontsize = 18)\n\t\tself.ax.plot(self.xdata, self.ydata, 'b-', lw=2)\n\t\t\n\t\tself.textes = []\n\t\tself.plots = []\n\t\tvbox.pack_start(self.main_figure_navBar, False, False, 0)\n\t\tvbox.pack_start(self.canvas, True, True, 2)\n\t\tself.popupwin.add(vbox)\n\t\tself.popupwin.connect(\"destroy\", self.dest)\n\t\tself.popupwin.show_all()\n\t\n\tdef dest(self,widget):\n\t\tself.popupwin.destroy()\n\t\n\tdef on_press(self, event):\n\t\tif event.inaxes == self.ax and event.button==3:\n\t\t\tself.clear_notes()\n\t\t\txc = event.xdata\n\t\t\t#***** Find the closest x value *****\n\t\t\tresiduel = self.xdata - xc\n\t\t\tresiduel = N.abs(residuel)\n\t\t\tj = N.argmin(residuel)\n\t\t\t#y = self.ydata[i-1:i+1]\n\t\t\t#yc= y.max()\n\t\t\t#j = N.where(self.ydata == yc)\n\t\t\t#j = j[0][0]\n\t\t\txc= self.xdata[j]\n\t\t\tx_fit = self.xdata[j-3:j+3]\n\t\t\ty_fit = self.ydata[j-3:j+3]\n\t\t\tfitted_param, fitted_data = fit(x_fit, y_fit, xc, True)\n\t\t\tx_fit = N.linspace(x_fit.min(), x_fit.max(), 200)\n\t\t\ty_fit = psdVoigt(fitted_param, x_fit)\n\t\t\tperiod = fitted_param['xc'].value\n\t\t\tstd_err= fitted_param['xc'].stderr\n\t\t\t\n\t\t\tp = self.ax.plot(x_fit, y_fit,'r-')\n\t\t\tp2 = self.ax.axvline(period,color='green',lw=2)\n\t\t\t\n\t\t\ttxt=self.ax.text(0.05, 0.9, 'Period = %.4f +- %.4f (nm)'%(period, std_err), transform = self.ax.transAxes, color='red')\n\t\t\tself.textes.append(txt)\n\t\t\tself.plots.append(p[0])\n\t\t\tself.plots.append(p2)\n\t\telif event.inaxes == self.ax and event.button==2:\n\t\t\tdif = N.diff(self.ydata)\n\t\t\tdif = dif/dif.max()\n\t\t\tp3 = self.ax.plot(dif,'r-')\n\t\t\tself.plots.append(p3[0])\n\t\tself.canvas.draw()\n\t\n\tdef clear_notes(self):\n\t\tif len(self.textes)>0:\n\t\t\tfor t in self.textes:\n\t\t\t\tt.remove()\n\t\tif len(self.plots)>0:\n\t\t\tfor p in self.plots:\n\t\t\t\tp.remove()\n\t\tself.textes = []\n\t\tself.plots = []\n","repo_name":"Traecp/DEVA","sub_path":"build/lib/DEVA/PopUpWindows.py","file_name":"PopUpWindows.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71138338825","text":"from preprocessing.feature_extraction import *\nfrom preprocessing.language_processing import *\nfrom llm.sentiment import *\nfrom data.data import *\nfrom copy import copy\nimport os\nfrom ensemble.gpt_class import label_row\nimport time\nimport ensemble.weak as weak\n\n\nclass Pipe:\n def __init__(self, dataset, shrink=False, size=1000, cut=False, cut_size=1000):\n self.stop_words = set_up_stop_words()\n self.name = dataset\n self.raw = load(dataset)\n if cut:\n self.cut(size=cut_size)\n if shrink:\n self.shrink(size=size)\n self.text = None\n self.processed = None\n self.extractor = None\n self.labeled = None\n\n def __reduce__(self):\n # Return a tuple with the callable, arguments, and the state (all attributes needed for recreation)\n return (\n self.__class__,\n (self.name,),\n {\n \"text\": self.text,\n \"processed\": self.processed,\n \"extractor\": self.extractor,\n \"stop_words\": self.stop_words,\n },\n )\n\n def shrink(self, size):\n self.raw = self.raw.sample(n=size, random_state=1)\n cyan(\"shrank dataset to %s\" % len(self.raw))\n\n def cut(self, size):\n self.raw = self.raw[:size]\n cyan(\"cut dataset to %s\" % len(self.raw))\n\n def process_texts(self):\n self.processed = copy(self.raw)\n self.processed = self.processed.assign(processed=self.processed[\"sentence\"])\n self.processed[\"processed\"] = self.processed[\"processed\"].apply(\n process_text, args=(self.stop_words,)\n )\n self.text = list(self.processed[\"processed\"])\n for i, t in enumerate(self.text):\n self.text[i] = \" \".join(t)\n # print(\"self.text: length: %s type: %s\" % (len(self.text), type(self.text)))\n\n def label_texts(self, test=False, batch=True, start=0, stop=10):\n cyan(\"Starting to label text. This will take a while.\")\n self.labeled = copy(self.raw)\n if test:\n self.labeled = self.labeled.head(3)\n if batch:\n self.labeled = self.labeled[start:stop]\n self.labeled[\"output\"] = self.labeled.apply(\n lambda row: label_row(row[\"sentence\"]), axis=1\n )\n self.labeled[\n [\"sentiment_score\", \"confidence_rating\", \"explanation_score\", \"explanation\"]\n ] = pd.DataFrame(self.labeled[\"output\"].tolist(), index=self.labeled.index)\n self.labeled.drop(columns=[\"output\"], inplace=True)\n # cyan(self.labeled)\n self.labeled.to_csv(\"test_label.csv\", index=False, sep=\"|\")\n file_name = os.path.basename(self.name)\n file_name = file_name.replace(\".csv\", \"\")\n save_dir = f\"labeled_data/{file_name}\" # Specify the directory path where you want to save the data\n os.makedirs(save_dir, exist_ok=True) # Create the directory if it doesn't exist\n if batch:\n labeled_csv_path = os.path.join(\n save_dir, f\"batch_{start}_{stop}_labeled.csv\"\n )\n else:\n labeled_csv_path = os.path.join(save_dir, f\"full_labeled.csv\")\n self.labeled.to_csv(labeled_csv_path, index=False, sep=\"|\")\n return start, stop\n\n def label_by_batch(self, batch_size=10, batch_start=0):\n final = len(self.raw)\n cyan(\"starting to process %s records in by batches of %s\" % (final, batch_size))\n batch_end = batch_start + batch_size\n while batch_end < final:\n start_time = time.time()\n batch_start, batch_end = self.label_texts(\n batch=True, start=batch_start, stop=batch_end\n )\n end_time = time.time()\n run_time = end_time - start_time\n green(\n \"finished processing batch %s - %s in %s seconds\"\n % (batch_start, batch_end, run_time)\n )\n batch_start = batch_end\n batch_end += batch_size\n if batch_start < final:\n start_time = time.time()\n batch_start, batch_end = self.label_texts(\n batch=True, start=batch_start, stop=final\n )\n end_time = time.time()\n run_time = end_time - start_time\n green(\n \"finished processing final batch %s - %s in %s seconds\"\n % (batch_start, final, run_time)\n )\n\n def extract_features(self):\n self.extractor = MultiExtractor(self.text)\n self.extractor.fit()\n self.extractor.process()\n self.processed[\"vector\"] = self.extractor.vector_list\n\n def run_pipe(self):\n self.process_texts()\n self.extract_features()\n\n def create_weak_classifiers(self):\n svm_classifier = weak.SVMClassifier(self.processed)\n naive_bayes_classifier = weak.NaiveBayesClassifier(self.processed)\n logistic_regression_classifier = weak.LogisticRegressionClassifier(\n self.processed\n )\n random_forest_classifier = weak.RandomForestClassifierWrapper(self.processed)\n\n return (\n svm_classifier,\n naive_bayes_classifier,\n logistic_regression_classifier,\n random_forest_classifier,\n )\n\n def save(self):\n # Extract the filename after 'data/' and remove the '.csv' extension\n file_name = os.path.basename(self.name)\n file_name = file_name.replace(\".csv\", \"\")\n save_dir = \"labeled_data/\" # Specify the directory path where you want to save the data\n os.makedirs(save_dir, exist_ok=True) # Create the directory if it doesn't exist\n\n # Save raw data as CSV\n cyan(\"starting to save...\")\n raw_csv_path = os.path.join(save_dir, f\"{file_name}_raw.csv\")\n self.raw.to_csv(raw_csv_path, index=False, sep=\"|\")\n\n # Save text data as CSV\n text_csv_path = os.path.join(save_dir, f\"{file_name}_text.csv\")\n pd.DataFrame({\"text\": self.text}).to_csv(text_csv_path, index=False, sep=\"|\")\n\n # Save processed data as CSV\n processed_csv_path = os.path.join(save_dir, f\"{file_name}_processed.csv\")\n self.processed.to_csv(processed_csv_path, index=False, sep=\"|\")\n\n print(\"Data successfully saved.\")\n\n\ndef load_pipe(file_name):\n save_dir = \"pickle/\" # Specify the directory path where you saved the data\n raw_csv_path = os.path.join(save_dir, f\"{file_name}_raw.csv\")\n text_csv_path = os.path.join(save_dir, f\"{file_name}_text.csv\")\n processed_csv_path = os.path.join(save_dir, f\"{file_name}_processed.csv\")\n\n raw_data = pd.read_csv(raw_csv_path)\n text_data = pd.read_csv(text_csv_path)[\"text\"].tolist()\n processed_data = pd.read_csv(processed_csv_path)\n\n pipe = Pipe(None) # Create an empty Pipe object\n pipe.name = file_name\n pipe.raw = raw_data\n pipe.text = text_data\n pipe.processed = processed_data\n\n # Refit the extractor\n pipe.extractor = MultiExtractor(pipe.text)\n pipe.extractor.fit()\n pipe.extractor.vector_list = pipe.processed[\"vector\"]\n\n return pipe\n\n\ndef classify_with_weak(file_name, name):\n pipe = Pipe(file_name, cut=True, cut_size=1000)\n pipe.process_texts()\n pipe.extract_features()\n # pipe2 = copy(pipe)\n cyan(\"starting without llm\")\n svm1, naive1, log1, rf1 = pipe.create_weak_classifiers()\n classifiers1 = [svm1, naive1, log1, rf1]\n weak_results = []\n weak_columns = []\n for classifier in classifiers1:\n yellow(\"calculating %s without llm\" % classifier.column)\n y_pred, accuracy, precision, recall, column = classifier.fit_and_evaluate()\n weak_results.append(y_pred)\n weak_columns.append(column)\n pipe.processed = add_weak_results(pipe.processed, weak_results, weak_columns)\n pipe.processed.to_csv(f\"results/with_weak/{name}.csv\", index=False, sep=\"|\")\n cyan(\"starting with llm\")\n \"\"\"\n weak_columns = []\n weak_results = []\n svm2, naive2, log2, rf2 = pipe2.create_weak_classifiers()\n classifiers2 = [svm2, naive2, log2, rf2]\n for classifier in classifiers2:\n cyan(\"calculating %s with llm\" % classifier.column)\n y_pred, accuracy, precision, recall, column = classifier.fit_and_evaluate(\n include_llm=True\n )\n y_pred = np.array(y_pred)\n weak_results.append(y_pred)\n weak_columns.append(column)\n pipe2.processed = add_weak_results(pipe2.processed, weak_results, weak_columns)\n pipe2.processed.to_csv(\n f\"results/with_weak/imdb_results_with_llm.csv\", index=False, sep=\"|\"\n )\n \"\"\"\n\n\ndef add_weak_results(df, results, columns):\n for result, column_name in zip(results, columns):\n df[column_name] = result\n return df\n\n\ndef classify_all_with_weak():\n # classify_with_weak(\"results\\\\depth_6_imdb_with_results.csv\", \"imdb\")\n # classify_with_weak(\"results\\\\depth_6_yelp_with_results.csv\", \"yelp\")\n # classify_with_weak(\"results\\\\depth_6_amazon_with_results.csv\", \"amazon\")\n # classify_with_weak(\"results\\\\depth_6_gold_with_results.csv\", \"gold\")\n classify_with_weak(\"results\\\\depth_7_movies_1000_with_results.csv\", \"movies\")\n\n\n","repo_name":"ginkorea/eesa","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":9145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30009703961","text":"import xcalar.container.driver.base as driver\nimport xcalar.container.context as ctx\nimport logging\nimport os\nimport json\n\nlogger = logging.getLogger(\"xcalar\")\n\n\n@driver.register_export_driver(name=\"darrin_json_driver\")\n@driver.param(\n name=\"dir_path\",\n type=driver.STRING,\n desc=\"export directory into which to drop our json files\")\n@driver.param(\n name=\"file_base\",\n type=driver.STRING,\n desc=\"base file name to be used in formatting the output files\")\n# @driver.param(name=\"driver param 4\", type=driver.TARGET,\n# desc=\"test driver param4\",\n# optional=True, secret=True)\ndef driver(table, dir_path, file_base):\n xpu_id = ctx.get_xpu_id()\n\n file_name = \"{}-{}.json\".format(file_base, xpu_id)\n path = os.path.join(dir_path, file_name)\n\n # Let's create our directories first\n os.makedirs(dir_path, exist_ok=True)\n\n rows = list(table.partitioned_rows())\n # Now we create the file itself\n with open(path, \"w\") as f:\n json.dump(rows, f)\n","repo_name":"varlogtim/xcalar","sub_path":"src/bin/sdk/xpu/xcalar/container/driver/builtins/darrin_json_driver.py","file_name":"darrin_json_driver.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"22039056426","text":"from . import signal\n\n\nCONVERSION_START = signal.Signal(args=['document'], name='inbound.CONVERSION_START')\n'''\n.. danger::\n .. deprecated:: 0.5.4\n No replacement.\n\nEmitted when the inbound conversion will start (i.e., this signal is emitted to cause a converter\nmodule to start the conversion).\n\n:kwarg object document: The inbound musical document. The required type is determined by each\n converter module individually.\n'''\n\nCONVERSION_STARTED = signal.Signal(name='inbound.CONVERSION_STARTED')\n'''\n.. danger::\n .. deprecated:: 0.5.4\n No replacement.\n\nEmitted as soon as the inbound conversion has started (i.e., as soon as the converter module has\nbegun to process data).\n'''\n\nCONVERSION_FINISH = signal.Signal(args=['converted'], name='inbound.CONVERSION_FINISH')\n'''\n.. danger::\n .. deprecated:: 0.5.4\n No replacement.\n\nEmitted just before the inbound conversion finishes (i.e., emitting this signal is the last action\nof an inbound conversion module).\n\n:kwarg converted: The inbound musical document, converted to Lychee-MEI format.\n:type converted: :class:`xml.etree.ElementTree.Element` or :class:`xml.etree.ElementTree.ElementTree`\n'''\n\nCONVERSION_FINISHED = signal.Signal(name='inbound.CONVERSION_FINISHED')\n'''\n.. warning::\n .. deprecated:: 0.5.4\n May be retained.\n\nEmitted when the inbound conversion is finished, before any \"views\" information is processed.\n'''\n\nCONVERSION_ERROR = signal.Signal(args=['msg'], name='inbound.CONVERSION_ERROR')\n'''\n.. danger::\n .. deprecated:: 0.5.4\n Will become part of CONVERSION_FINISHED, if it is retained.\n\nEmitted when there's an error during the in bound conversion step.\n\n:kwarg str msg: A descriptive error message for the log file.\n'''\n\nVIEWS_START = signal.Signal(\n args=['converted', 'document', 'session', 'views_info'],\n name='inbound.VIEWS_START'\n)\n'''\n.. danger::\n .. deprecated:: 0.5.4\n No replacement.\n\nEmit this signal to start the inbound views processing step.\n\n:param converted: The incoming (partial) document, already converted.\n:type converted: :class:`lxml.etree.Element` or :class:`lxml.etree.ElementTree`\n:param document: The incoming (partial) document for conversion, as supplied to\n :func:`do_inbound_conversion`.\n:type document: As required by the converter.\n:param session: A session instance for the ongoing notation session.\n:type session: :class:`lychee.workflow.session.InteractiveSession`\n:param str views_info: The ``views_info`` argument from the :const:`~lychee.signals.ACTION_START`\n signal. This is interpreted as the Lychee-MEI @xml:id that should be used for ``converted``. If\n omitted, assume ``converted`` is a new ``
`` in this document.\n\nBy default, this signal is not connected to a views-processing module so you must connect it to the\nproper function before you emit this signal. This is provided as a signal so that additional modules\ncan be notified of the workflow progress.\n\nFor information on writing a views processing module, refer to the :mod:`lychee.views` module\ndocumentation.\n'''\n\nVIEWS_STARTED = signal.Signal(name='inbound.VIEWS_STARTED')\n'''\n.. danger::\n .. deprecated:: 0.5.4\n No replacement.\n\nEmitted by the inbound views processing module as soon as it gains control, thereby confirming that\nan inbound views processor was correctly chosen.\n'''\n\nVIEWS_FINISH = signal.Signal(args=['views_info'], name='inbound.VIEWS_FINISH')\n'''\n.. danger::\n .. deprecated:: 0.5.4\n No replacement.\n\nEmitted by the inbound views processing module to return views information to its caller.\n'''\n\nVIEWS_FINISHED = signal.Signal(name='inbound.VIEWS_FINISHED')\n'''\n.. warning::\n .. deprecated:: 0.5.4\n May be retained.\n\nEmitted after inbound views processing by the module running the workflow.\n'''\n\nVIEWS_ERROR = signal.Signal(args=['msg'], name='inbound.VIEWS_ERROR')\n'''\n.. danger::\n .. deprecated:: 0.5.4\n Will become part of VIEWS_FINISHED, if it is retained.\n\nEmitted by the inbound views processing module, or the module running the workflow, to indicate that\nan error has occurred while generating views information. The error may be recoverable, or may cause\nthe entire views step to fail, but *Lychee* may be able to continue the workflow.\n\n:kwarg str msg: A descriptive error message for the log file.\n'''\n","repo_name":"nCoda/lychee","sub_path":"lychee/signals/inbound.py","file_name":"inbound.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"14384822822","text":"import time\nimport keyboard\nimport win32api, win32con\nimport mss\n\nsct = mss.mss()\n\n# Coordinates of window\nx1 = 660\ny1 = 370\nx2 = 600\ny2 = 420\n\nwindow = {\"top\": y1, \"left\": x1, \"width\": x2, \"height\": y2}\n\ntime.sleep(2)\n\n# Click on the given x,y\ndef click(x,y):\n win32api.SetCursorPos((x,y))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)\n\n# Press \"q\" to stop the script\nwhile keyboard.is_pressed('q') == False:\n # Take screenshot\n image = sct.grab(window)\n\n for y in range(0, int(y2), 10):\n for x in range(0, int(x2), 10):\n\n r, g, b = image.pixel(x, y)\n if r == 255 and g == 219 and b == 195:\n click(x+x1, y+y1)\n time.sleep(0.02)","repo_name":"RealSoerensen/AimBooster-Bot","sub_path":"aimbooster_bot.py","file_name":"aimbooster_bot.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1241778806","text":"\"\"\"\n@author yl247234 \n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport re, glob, os\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport math\nlabel_size = 18\nmpl.rcParams['xtick.labelsize'] = label_size\nmpl.rcParams['ytick.labelsize'] = label_size \ntext_size = 26\n\n## INPUTS ##\nDIRECTORY_STAP = '/neurospin/brainomics/2016_stap/'\nleft_STAP = 'morpho_S.T.s._left.dat'\nright_STAP = 'morpho_S.T.s._right.dat'\n\ncolumns = ['geodesicDepthMax', 'geodesicDepthMean', 'plisDePassage', 'hullJunctionsLength']\ncolumns_f = ['FID', 'IID']+columns\n\ndf_right = pd.read_csv(DIRECTORY_STAP+'brut_output/'+right_STAP, delim_whitespace=True)\np = [2 if p >0 else 1 for p in df_right['plisDePassage']]\ndf_right['plisDePassage'] = p \ndf_right['FID'] = ['%012d' % int(i) for i in df_right['subject']]\ndf_right['IID'] = ['%012d' % int(i) for i in df_right['subject']]\ndf_right.index = df_right['IID']\ndf_right = df_right[columns_f]\ndf_right= df_right.dropna()\n\ndf_left = pd.read_csv(DIRECTORY_STAP+'brut_output/'+left_STAP, delim_whitespace=True)\np = [2 if p >0 else 1 for p in df_left['plisDePassage']]\ndf_left['plisDePassage'] = p \ndf_left['FID'] = ['%012d' % int(i) for i in df_left['subject']]\ndf_left['IID'] = ['%012d' % int(i) for i in df_left['subject']]\ndf_left.index = df_left['IID']\ndf_left = df_left[columns_f]\ndf_left = df_left.dropna()\n\n\ndf_left0 = df_left.loc[df_right.index]\ndf_left0 = df_left0.dropna()\ndf_right0 = df_right.loc[df_left0.index]\ndf_asym0 = pd.DataFrame()\ndf_asym0['FID'] = df_left0['FID']\ndf_asym0['IID'] = df_left0['IID']\ndf_asym0.index = df_asym0['IID']\ndf_asym0['asymDepthMax'] = 2*(df_right0['geodesicDepthMax']-df_left0['geodesicDepthMax'])/(df_left0['geodesicDepthMax']+df_right0['geodesicDepthMax'])\n\n#### COMPARISON MALE FEMALE ####\ncovar = '/neurospin/brainomics/imagen_central/clean_covar/covar_GenCit5PCA_ICV_MEGHA.cov'\ndf_covar = pd.read_csv(covar, delim_whitespace=True, header=None)\ndf_covar.columns = [u'IID',u'FID', u'C1', u'C2', u'C3', u'C4', u'C5', u'Centres_Berlin',\n u'Centres_Dresden', u'Centres_Dublin', u'Centres_Hamburg',\n u'Centres_London', u'Centres_Mannheim', u'Centres_Nottingham',\n u'SNPSEX', u'ICV'] \ndf_covar['IID'] = ['%012d' % int(i) for i in df_covar['IID']]\ndf_covar.index = df_covar['IID']\nindex_sex = df_covar.index\nindex_female = df_covar['IID'][df_covar['SNPSEX'] == 0]\nindex_male = df_covar['IID'][df_covar['SNPSEX'] == 1]\n\n## COMPARISON RIGHT AND LEFT HANDED ###\ncovar = '/neurospin/brainomics/imagen_central/clean_covar/covar_GenCitHan5PCA_ICV_MEGHA.cov'\ndf_covar = pd.read_csv(covar, delim_whitespace=True, header=None)\ndf_covar.columns = [u'IID',u'FID', u'C1', u'C2', u'C3', u'C4', u'C5', u'Centres_Berlin',\n u'Centres_Dresden', u'Centres_Dublin', u'Centres_Hamburg',\n u'Centres_London', u'Centres_Mannheim', u'Centres_Nottingham',\n u'Handedness', u'SNPSEX', u'ICV'] \ndf_covar.index = df_covar['IID']\ndf_covar['IID'] = ['%012d' % int(i) for i in df_covar['IID']]\nindex_handed = df_covar.index\nindex_left = df_covar['IID'][df_covar['Handedness'] == 1]\nindex_right = df_covar['IID'][df_covar['Handedness'] == 0]\n\ndef customize_bp(ax, bp):\n ## Remove top axes and right axes ticks\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ## change outline color, fill color and linewidth of the boxes\n for box in bp['boxes']:\n # change outline color\n box.set( color='#7570b3', linewidth=2)\n # change fill color\n #box.set( facecolor = '#1b9e77' )\n ## change color and linewidth of the whiskers\n for whisker in bp['whiskers']:\n whisker.set(color='#7570b3', linewidth=2)\n ## change color and linewidth of the caps\n for cap in bp['caps']:\n cap.set(color='#7570b3', linewidth=2)\n ## change color and linewidth of the medians\n for median in bp['medians']:\n median.set(color='red', linewidth=2)\n ## change the style of fliers and their fill\n for flier in bp['fliers']:\n flier.set(marker='o', color='#e7298a', alpha=0.5)\n\ndata_to_plot = [np.asarray(df_right['geodesicDepthMax']), np.asarray(df_left['geodesicDepthMax'])]\n# Create a figure instance\nfig = plt.figure(1, figsize=(9, 6))\n# Create an axes instance\nax = fig.add_subplot(111)\n# Create the boxplot\nbp = ax.boxplot(data_to_plot)\ncustomize_bp(ax, bp)\n## Custom x-axis labels\nax.set_xticklabels(['Right Depth Max', 'Left Depth Max'], fontsize=text_size, fontweight = 'bold')\nax.set_ylabel('Depth Max [mm]',fontsize=text_size, fontweight = 'bold', labelpad=0)\n\n\n\ndata_to_plot2 = [np.asarray(df_asym0['asymDepthMax']), np.asarray(df_asym0['asymDepthMax'].loc[index_male]), np.asarray(df_asym0['asymDepthMax'].loc[index_female]), np.asarray(df_asym0['asymDepthMax'].loc[index_right]), np.asarray(df_asym0['asymDepthMax'].loc[index_left])]\nfig = plt.figure(2, figsize=(9, 6))\n# Create an axes instance\nax = fig.add_subplot(111)\n# Create the boxplot\nbp = plt.boxplot(data_to_plot2)\ncustomize_bp(ax, bp)\nax.set_xticklabels(['All subjects: ' + str(len(df_asym0['asymDepthMax'])), 'Male: ' + str(len(df_asym0['asymDepthMax'].loc[index_male])), 'Female: ' + str(len(df_asym0['asymDepthMax'].loc[index_female])), 'Right hand: ' + str(len(df_asym0['asymDepthMax'].loc[index_right])), 'Left hand: ' + str(len(df_asym0['asymDepthMax'].loc[index_left]))], fontsize=text_size, fontweight = 'bold')\nax.set_ylabel('AI = 2(R-L)/(R+L) ',fontsize=text_size, fontweight = 'bold', labelpad=0)\n\n### ALLOMETRY NORMALIZATION ### \nbrainvisa_icv = '/neurospin/imagen/workspace/cati/morphometry/volumes/BL_morphologist_tissues_volumes.csv'\ndf21 = pd.read_csv(brainvisa_icv, sep=';')\ndf21['IID'] = ['%012d' % int(i) for i in df21['subject'] ]\ndf21.index = df21['IID']\ncovar_path = '/neurospin/brainomics/imagen_central/covar/'\ndf1 = pd.read_csv(covar_path+'aseg_stats_volume_BL.csv')\ndf1 = df1[['Measure:volume', 'EstimatedTotalIntraCranialVol']]\ndf1.columns = ['IID', 'ICV']\ndf1.index = df1['IID']\ndf1 = df1.sort_index(axis=0)\ndf1['IID'] = ['%012d' % int(i) for i in df1['IID']]\ndf1.index = df1['IID']\ndf2 = df21.loc[df1.index]\ndf2 = df2.dropna()\neTIV_Bv = np.asarray(df2['eTIV'])\n\nallometry_coeffs = {}\nx = np.log(eTIV_Bv)\ndf_left_al = df_left.loc[df1.index]\ndf_left_al = df_left_al.dropna()\ny = np.log(df_left_al['geodesicDepthMax'])\np = np.polyfit(x, y, 1)\nallometry_coeffs['left_depthMax'] = p[0]\ndf_left_al['geodesicDepthMax'] = df_left_al['geodesicDepthMax']/np.power(eTIV_Bv,allometry_coeffs['left_depthMax'])\ndf_right_al = df_right.loc[df1.index]\ndf_right_al = df_right_al.dropna()\ny = np.log(df_right_al['geodesicDepthMax'])\np = np.polyfit(x, y, 1)\nallometry_coeffs['right_depthMax'] = p[0]\ndf_right_al['geodesicDepthMax'] = df_right_al['geodesicDepthMax']/np.power(eTIV_Bv,allometry_coeffs['right_depthMax'])\n\n\ndf_left0_al = df_left_al.loc[df_right_al.index]\ndf_left0_al = df_left0_al.dropna()\ndf_right0_al = df_right_al.loc[df_left0_al.index]\ndf_asym0_al = pd.DataFrame()\ndf_asym0_al['FID'] = df_left0_al['FID']\ndf_asym0_al['IID'] = df_left0_al['IID']\ndf_asym0_al.index = df_asym0_al['IID']\ndf_asym0_al['asymDepthMax'] = 2*(df_right0_al['geodesicDepthMax']-df_left0_al['geodesicDepthMax'])/(df_left0_al['geodesicDepthMax']+df_right0_al['geodesicDepthMax'])\n\n### END ALLOMETRY NORMALISATION ####\ndata_to_plot3 = [np.asarray(df_asym0['asymDepthMax'].loc[index_sex]), np.asarray(df_asym0['asymDepthMax'].loc[index_male]), np.asarray(df_asym0['asymDepthMax'].loc[index_female]), np.asarray(df_asym0_al['asymDepthMax'].loc[index_sex]), np.asarray(df_asym0_al['asymDepthMax'].loc[index_male]), np.asarray(df_asym0_al['asymDepthMax'].loc[index_female])]\nfig = plt.figure(3, figsize=(24, 18))\n# Create an axes instance\nax = fig.add_subplot(111)\n# Create the boxplot\nbp = plt.boxplot(data_to_plot3)\ncustomize_bp(ax, bp)\nax.set_xticklabels(['All: ' + str(len(df_asym0['asymDepthMax'].loc[index_sex])), 'Male: ' + str(len(df_asym0['asymDepthMax'].loc[index_male])), 'Female: ' + str(len(df_asym0['asymDepthMax'].loc[index_female])), 'All (allo)', 'Male (allo)', 'Female (allo)'], fontsize=text_size, fontweight = 'bold')\nax.set_ylabel('AI = 2(R-L)/(R+L) ',fontsize=text_size, fontweight = 'bold', labelpad=0)\n\n\n\n\n\n\n\n\n\n\n\n\nplt.show()\n","repo_name":"neurospin/scripts","sub_path":"2016_stap/box_plot.py","file_name":"box_plot.py","file_ext":"py","file_size_in_byte":8224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"38157558118","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 10 15:59:12 2019\n\n@author: adamreidsmith\n\"\"\"\n\n'''\nStandard neural network with dropout. Input is the peaks and corresponding \nfrequencies of the Fourier transform of a solution to the Van der Pol equation\nx'' - a*(1-x^2)*x' + b*x = f(t) there f is periodic or multiperiodic with a \nrandom phase that is encorporated into training. The network predicts values \nof the parameters a and b given training data.\n\nDatafiles for this network can be generated by running 'vdp_data.py'.\n'''\n\n#Path of the datafile created by 'vdp_data.py'.\nfile_path = './datafiles/vdp_data_800pts_[soln,FT,hist,phase(True),param].npy'\n\nfrom os import path\nassert path.exists(file_path), 'Datafile not found. Please run \\'vdp_data.py\\' \\\n to generate a datafile.'\n \nimport torch\nimport numpy as np\nfrom torch import nn\nfrom scipy.signal import find_peaks\nfrom torch.utils.data import Dataset, DataLoader, random_split\nimport matplotlib.pyplot as plt\n\n###############################################################################\n'''\nInputs:\n n_epochs: Number of epochs to train for.\n \n batch_size: Batch size.\n \n lr: Learning Rate.\n \n weight_decay: Weight decay factor.\n \n lr_factor: Learning rate decay factor. Learning rate is multiplied \n by this factor every epoch.\n \n loss_function: Loss function. Can be:\n 'mean square': loss = sum((x_i - y_i)^2)\n 'log cosh': loss = sum(log(cosh(x_i - y_i)))\n \n include_phase: Include or exclude the phase in the training of the \n neural network.\n \n num_peaks: Number of peaks in Fourier transforms to include in training.\n \n inputs: Values to use as inputs to the network. This is a list \n of strings each indicating a set of inputs to the \n network. Valid entries are:\n 'refft': Real part of the FFT.\n 'imfft': Imaginary part of the FFT.\n 'absfft': Absolute value of the FFT.\n 'refft_peaks': Real part of peaks in FFT. Peaks \n are computed from absolute value \n of the FFT.\n 'imfft_peaks': Imaginary part of peaks in FFT. \n Peaks are computed from absolute \n value of the FFT.\n 'absfft_peaks': Peaks in the absolute value of the FFT.\n 'phase': Phase(s) in the forcing function.\n'''\n###############################################################################\n\ndef main(n_epochs=150,\n batch_size=4,\n lr=0.001,\n lr_factor=0.98,\n weight_decay=1e-8,\n loss_function='log cosh',\n include_phase=True,\n num_peaks=6,\n inputs=['absfft_peaks', 'phase']):\n \n class Data(Dataset):\n \n def __init__(self):\n print('\\nLoading data...')\n self.data = np.load(file_path)\n \n dlen = len(self.data)\n \n #Parameters used in solution of Van der Pol oscillator\n self.parameters = torch.Tensor([self.data[i] for i in range(dlen) if (i+1) % 5 == 0])\n \n #Phase phi included in the forcing function cos(wt+phi)\n self.phase = torch.Tensor([[self.data[i]] for i in range(dlen) if (i+2) % 5 == 0])\n \n #Time points at which the Van dar Pol equation was evaluated.\n self.time = [self.data[i] for i in range(dlen) if i % 5 == 0]\n self.time = [self.time[0][i][0] for i in range(len(self.time[0]))]\n \n #Tensor of complex values of Fourier transform\n self.fft = [self.data[i] for i in range(dlen) if (i+4) % 5 == 0]\n self.fft = [self.fft[i][:,1][:len(self.time)//2+1] for i in range(len(self.fft))]\n \n #Tensor of absolute value of Fourier transform values\n self.absfft = torch.Tensor(np.abs(self.fft))\n \n #Tensor of real part of Fourier transform values\n if 'refft' in inputs:\n self.refft = torch.Tensor(np.real(self.fft))\n \n #Tensor of imaginary part of Fourier transform values\n if 'imfft' in inputs:\n self.imfft = torch.Tensor(np.imag(self.fft))\n \n #Indices of peaks in absolute value of Fourier transform\n self.peaks_indices = [find_peaks(self.absfft[i])[0] for i in range(len(self.absfft))]\n \n #Indices of the top n peaks in each Fourier transform\n self.num_max_peaks = num_peaks #Number of heighest peaks to consider\n self.max_n_peaks_indices = []\n peaks_indices = self.peaks_indices.copy()\n for i in range(len(self.peaks_indices)):\n for j in range(self.num_max_peaks):\n key_func = lambda index: self.absfft[i][index]\n try:\n max_peak_index = max(peaks_indices[i], key=key_func)\n except:\n max_peak_index = None\n if j == 0:\n #appendee = [max_peak_index] if max_peak_index is not None else self.max_n_peaks_indices[-1][0]\n self.max_n_peaks_indices.append([max_peak_index])\n else:\n appendee = max_peak_index if max_peak_index is not None else self.max_n_peaks_indices[-1][0]\n self.max_n_peaks_indices[-1].append(appendee) \n \n index = np.argwhere(peaks_indices[i] == max_peak_index)\n peaks_indices[i] = np.delete(peaks_indices[i], index)\n \n #Values and frequencies of top n peaks in each Fourier transform\n self.max_n_peaks = [[self.fft[i][j] for j in self.max_n_peaks_indices[i]] for i in range(len(self.fft))]\n self.max_n_peaks_time = [[self.time[j].item() for j in self.max_n_peaks_indices[i]] for i in range(len(self.fft))]\n \n self.len = self.parameters.shape[0]\n \n def __getitem__(self, index): \n \n added_time = False\n items_to_cat = []\n for string in inputs:\n if string != 'phase':\n if string == 'refft':\n items_to_cat.append(self.refft[index])\n \n elif string == 'imfft':\n items_to_cat.append(self.imfft[index])\n \n elif string == 'absfft':\n items_to_cat.append(self.absfft[index])\n\n elif string == 'refft_peaks':\n items_to_cat.append(torch.Tensor(np.real(self.max_n_peaks[index])))\n if not added_time:\n items_to_cat.append(torch.Tensor(self.max_n_peaks_time[index]))\n added_time = True\n \n elif string == 'imfft_peaks':\n items_to_cat.append(torch.Tensor(np.imag(self.max_n_peaks[index])))\n if not added_time:\n items_to_cat.append(torch.Tensor(self.max_n_peaks_time[index]))\n added_time = True\n \n elif string == 'absfft_peaks':\n items_to_cat.append(torch.Tensor(np.abs(self.max_n_peaks[index])))\n if not added_time:\n items_to_cat.append(torch.Tensor(self.max_n_peaks_time[index]))\n added_time = True\n else:\n raise RuntimeError('Undefined input string: ' + str(string))\n \n if len(items_to_cat) > 1:\n item = torch.cat(items_to_cat)\n else:\n item = items_to_cat[0]\n \n return [item, self.phase[index]], self.parameters[index]\n \n __len__ = lambda self: self.len\n \n \n dataset = Data()\n \n # Lengths of the training and validation datasets\n train_len = int(0.75*dataset.len)\n valid_len = dataset.len - train_len\n \n #Randomly split the data into training and validation datasets\n train_data, valid_data = random_split(dataset, (train_len, valid_len))\n \n train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(dataset=valid_data, batch_size=batch_size, shuffle=True)\n \n \n #Model of the neural network\n class Model(nn.Module):\n \n def __init__(self):\n super(Model, self).__init__()\n \n n_inputs = self.compute_ninputs(inputs)\n \n #Fully connected linear layers\n #self.dropout1 = nn.Dropout(p=0.1)\n self.fc1 = nn.Linear(in_features=n_inputs, out_features=500)\n self.dropout2 = nn.Dropout(p=0.1)\n self.fc2 = nn.Linear(in_features=500, out_features=50)\n self.dropout3 = nn.Dropout(p=0.1)\n self.fc3 = nn.Linear(in_features=50, out_features=2)\n \n def forward(self, x, phi):\n \n #Append phi to the input vector\n if 'phase' in inputs:\n x = torch.cat((x,phi.flatten(1)),1)\n \n #Linear layers wih dropout\n #x = self.dropout1(x)\n x = torch.sigmoid(self.fc1(x))\n x = self.dropout2(x)\n x = torch.sigmoid(self.fc2(x))\n x = self.dropout3(x) \n return self.fc3(x)\n \n def compute_ninputs(self,inputs):\n added_time = False\n n_inputs = 0\n for string in inputs:\n if string == 'phase':\n n_inputs += len(dataset.phase[0][0])\n \n elif string in ['refft', 'imfft', 'absfft']:\n n_inputs += len(dataset.absfft[0])\n \n elif string in ['refft_peaks', 'imfft_peaks', 'absfft_peaks']:\n n_inputs += dataset.num_max_peaks\n if not added_time:\n n_inputs += dataset.num_max_peaks\n added_time = True\n \n return n_inputs\n\n \n model = Model()\n \n if loss_function == 'mean square':\n loss_func = nn.MSELoss()\n elif loss_function == 'log cosh':\n loss_func = lambda x, y: torch.log(torch.cosh(2*(x - y))).sum()\n else:\n raise RuntimeError('loss_function not recognized. \\\n Set loss_function to \\'mean square\\' or \\'log cosh\\'')\n \n #Optimizer\n optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_factor)\n \n def evaluate():\n #Evaluation mode\n model.eval()\n for data in valid_loader:\n \n #Split batch into inputs and outputs\n x, phi, y = data[0][0], data[0][1], data[1].squeeze()\n \n #Forward propagation\n out = model(x, phi)\n \n #Loss computation\n loss = loss_func(out, y)\n \n #Save training loss in this batch\n valid_loss.append(loss.item())\n \n #Compute the average percent error over a validation batch\n percent_error = 100*torch.div(abs(out - y), y)\n all_percent_error.extend(percent_error.flatten().squeeze(0).tolist())\n \n return valid_loss\n \n def train():\n #Training mode\n model.train()\n for data in train_loader:\n \n #Split batch into inputs and outputs\n x, phi, y = data[0][0], data[0][1], data[1].squeeze()\n \n def closure():\n #Reset gradients to zero\n optimizer.zero_grad()\n \n #Forward propagation\n out = model(x, phi)\n \n #Loss computation\n loss = loss_func(out, y)\n \n #Backpropagation\n loss.backward()\n \n return loss\n \n #Weight optimiation\n optimizer.step(closure)\n \n #Save training loss in this batch\n train_loss.append(closure().item())\n \n return train_loss\n \n def plot_hist():\n #Plot histograms of the error (Predicted - True) in the predicted data\n error = [] \n model.eval()\n for data in valid_loader:\n #Split batch into inputs and outputs\n x, phi, y = data[0][0], data[0][1], data[1].squeeze()\n \n out = model(x, phi)\n error.append((out - y).detach().numpy())\n \n error = np.array(error)\n a_error = np.array([error[i][j][0] for i in range(len(error)) for j in range(batch_size)])\n b_error = np.array([error[i][j][1] for i in range(len(error)) for j in range(batch_size)])\n \n plt.figure(figsize=(8,6))\n plt.hist(a_error, bins=30, color='b')\n plt.title('Prediction error in parameter \\'a\\' in validation data')\n plt.xlabel('Predicted - True')\n plt.figure(figsize=(8,6))\n plt.hist(b_error, bins=30, color='k')\n plt.title('Prediction error in parameter \\'b\\' in validation data')\n plt.xlabel('Predicted - True')\n \n plt.figure(figsize=(8,6))\n p_err_less_100 = [i for i in all_percent_error if i <= 100]\n n_more_100 = len(all_percent_error) - len(p_err_less_100) \n plt.hist(p_err_less_100, bins=30)\n plt.text(x=plt.xlim()[1]-35, y=plt.ylim()[1]-20, s='More than 100% error:\\n'+str(n_more_100))\n plt.xlabel('Percent Error')\n plt.title('Histogram of percent errors in predictions of validation data')\n \n plt.show()\n \n #Print statistics about the current run\n print('\\nModel Information:\\n', model, sep='')\n print('\\nRun Start', \n '\\n Batch size:', batch_size, \n '\\n Epochs:', n_epochs,\n '\\n Number of peaks:', dataset.num_max_peaks,\n '\\n Training data size:', len(train_loader)*batch_size,\n '\\n Validation data size:', len(valid_loader)*batch_size,\n '\\n Learning rate:', lr,\n '\\n LR decay factor:', lr_factor,\n '\\n Weight decay:', weight_decay,\n '\\n Loss function:', loss_function,\n '\\n Optimizer:', repr(optimizer).partition('(')[0],\n '\\n LR scheduler:', repr(scheduler)[repr(scheduler).find('er.')+3:repr(scheduler).find(' obj')],\n '\\n')\n \n #Training and evaluation loop\n for epoch in range(n_epochs): #An epoch is a run of the entire training dataset\n \n train_loss, valid_loss, all_percent_error = [], [], []\n \n #Train the network\n train_loss = train()\n\n #Evaluate the network\n valid_loss = evaluate()\n \n if (epoch+1) % 5 == 0:\n print('Epoch:', epoch+1,\n '\\n Learning rate: ', scheduler.get_lr()[0],\n '\\n Mean epoch training loss: ', np.mean(train_loss),\n '\\n Mean epoch validation loss:', np.mean(valid_loss),\n '\\n Overfitting factor: ', np.mean(valid_loss)/np.mean(train_loss),\n '\\n Median percent error: ', np.median(np.array(all_percent_error)), '%')\n \n #Update the learing rate\n scheduler.step()\n \n plot_hist()\n\nmain()\n\n\n\n\n\n","repo_name":"adamreidsmith/manifold-data-in-nns","sub_path":"models/nn_ft.py","file_name":"nn_ft.py","file_ext":"py","file_size_in_byte":16452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34132989761","text":"# source /media/csabi/Samsung_T7/VirtualEnvs/Warmane/bin/activate\n\n\nimport unittest\nimport os\n# import boto3\nimport json\nimport requests\nimport random\n# import pickle\nfrom independent_functions.googleauthenticator import get_totp_token as get_mfa\nfrom independent_functions.get_proxies import get_proxies\nfrom independent_functions.wait_between import wait_between\nfrom independent_functions.human_like_mouse_move import human_like_mouse_move\nfrom time import sleep\nfrom pydub import AudioSegment\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import ElementClickInterceptedException\nfrom selenium.common.exceptions import TimeoutException\nfrom urllib3.exceptions import MaxRetryError\n\n\n# Randomization Related\nMIN_RAND = 0.64\nMAX_RAND = 1.27\nLONG_MIN_RAND = 4.78\nLONG_MAX_RAND = 11.1\n\n\nclass Warmane(unittest.TestCase):\n\n previous_response = \"\"\n default_page_proxy = 0\n wittoken = os.environ.get(\"wittoken\")\n warmane_acc = os.environ.get(\"warmane_acc\")\n warmane_pass = os.environ.get(\"warmane_pass\")\n access_token = os.environ.get(\"ACCESS_TOKEN\")\n psid = os.environ.get(\"FB_ID\")\n ack = os.environ.get(\"ACK\")\n sck = os.environ.get(\"SCK\")\n MFA = os.environ.get(\"mfa\")\n fb_api_url = 'https://graph.facebook.com/v8.0/me/'\n filename = 'test.mp3'\n startpage = 'https://www.warmane.com/account'\n log_list = []\n proxy = 0\n cookies = \"cookies.txt\"\n cookie_worked = False\n # s3 = boto3.resource(\n # 's3',\n # aws_access_key_id=ack,\n # aws_secret_access_key=sck\n # )\n # obj = s3.Object(\n # 'bucket-for-cookies',\n # 'cookies.txt'\n # )\n stop_s3 = False\n headless = True\n options = None\n profile = None\n capabilities = None\n captcha_retries = 15\n\n # Setup options for webdriver\n def setUpOptions(self):\n self.options = webdriver.FirefoxOptions()\n self.options.headless = self.headless\n self.options.binary_location = os.environ.get(\"FIREFOX_BIN\")\n\n # Enable Marionette, An automation driver for Mozilla's Gecko engine\n def setUpCapabilities(self):\n self.capabilities = webdriver.DesiredCapabilities.FIREFOX\n self.capabilities['marionette'] = True\n\n # Setup settings\n def setUp(self, proxy=default_page_proxy):\n self.setUpOptions()\n self.setUpCapabilities()\n if proxy == 0:\n pass\n else:\n self.stop_s3 = True\n\n # Splitting the proxy because the port\n # needs to be an integer\n try:\n proxy, proxy_port = proxy.split(\":\")\n except AttributeError:\n self.test_run()\n\n self.options.set_preference(\n 'network.proxy.type', 1\n ) # int\n self.options.set_preference(\n 'network.proxy.socks', proxy\n ) # string\n self.options.set_preference(\n 'network.proxy.socks_port', int(proxy_port)\n ) # int\n self.options.set_preference(\n 'network.proxy.socks_version', 4\n ) # int\n setting_up = True\n\n while setting_up is True:\n try:\n self.driver = webdriver.Firefox(\n options=self.options,\n capabilities=self.capabilities,\n firefox_profile=self.profile,\n executable_path=\"./geckodriver\"\n )\n setting_up = False\n except Exception as e:\n print(e)\n try:\n self.driver.quit()\n except Exception:\n pass\n print(\"Driver unexpectedly closed, retrying....\")\n\n self.driver.set_page_load_timeout(15)\n\n # def save_cookies(self):\n # self.obj.delete()\n\n # with open(self.cookies, \"wb\") as f:\n # pickle.dump(self.driver.get_cookies(), f)\n\n # with open('cookies.txt', 'rb') as data:\n # self.obj.upload_fileobj(data)\n\n # def load_cookies(self):\n\n # if self.stop_s3 is False:\n # with open('cookies.txt', 'wb') as data:\n # self.obj.download_fileobj(data)\n\n # print(\"Got cookies from S3\")\n # else:\n # pass\n # try:\n # with open(self.cookies, \"rb\") as f:\n # cookies = pickle.load(f)\n # self.driver.delete_all_cookies()\n # have to be on a page before you can\n # add any cookies, any page - does not matter which\n # for cookie in cookies:\n # Checks if the instance expiry a float\n # if isinstance(cookie.get('expiry'), float):\n # it converts expiry cookie to an int\n # cookie['expiry'] = int(cookie['expiry'])\n # self.driver.add_cookie(cookie)\n # except Exception:\n # print(\"No cookies found\")\n\n def send_text_message(self, message):\n\n message = ('\\n'.join(map(str, message)))\n\n headers = {\n 'Content-Type': 'application/json'\n }\n\n data = {\n 'messaging_type': 'RESPONSE',\n 'recipient': {'id': self.psid},\n 'message': {'text': message}\n }\n\n params = {'access_token': self.access_token}\n api_url = self.fb_api_url + 'messages'\n response = requests.post(\n api_url,\n headers=headers,\n params=params,\n data=json.dumps(data)\n )\n\n print(response.content)\n print(data)\n\n def audioToText(self, wavaudiofilename):\n\n url = 'https://api.wit.ai/speech'\n headers = {\n 'Authorization': f'Bearer {self.wittoken}',\n 'Content-Type': 'audio/wav',\n }\n\n params = (\n ('v', '20200513'),\n )\n\n with open(wavaudiofilename, 'rb') as e:\n data = e.read()\n response = requests.post(\n url,\n headers=headers,\n params=params,\n data=data\n )\n\n data = response.json()\n\n return data[\"text\"]\n\n def saveFile(self, content):\n with open(self.filename, \"wb\") as handle:\n for data in content.iter_content():\n handle.write(data)\n\n def something_went_wrong(self):\n print(\"!!!Something went wrong!!!\")\n self.log_list = [\n (\"Unsuccessful try, \"\n \"please try to run \"\n \"the script chmod +x\"\n \" geckodriver && \"\n \"python collect.py\"\n \" manually on\"\n \" https://dashboard.heroku.com/\"\n \"apps/warmane-app .\")]\n self.send_text_message(self.log_list)\n self.driver.quit()\n os._exit(os.EX_OK)\n\n def recursive_retry(self):\n self.captcha_retries -= 1\n print(\n str(self.captcha_retries) +\n \" retries left\"\n )\n\n self.driver.close()\n self.driver.quit()\n proxy = get_proxies()\n self.setUp(proxy)\n\n self.test_run()\n\n def captcha(self, n):\n\n try:\n self.driver.get(self.startpage)\n try:\n # self.load_cookies()\n sleep(2)\n self.driver.find_element_by_class_name(\"navigation-logo\")\n self.driver.refresh()\n except Exception as e:\n print(e)\n self.driver.quit()\n print(f\"{n} retries left\")\n\n if n == 0 or n < 0:\n os._exit(os.EX_OK)\n\n else:\n\n proxy = get_proxies()\n self.setUp(proxy)\n self.captcha(n-1)\n try:\n self.driver.find_element_by_id(\"userID\")\n print(\"Cookies are no longer working for this website\")\n except NoSuchElementException:\n print(\"Cookies were loaded up successfully\")\n self.cookie_worked = True\n sleep(2)\n\n if self.cookie_worked is not True:\n\n print(\"Opened the startpage, \"\n \"checking the iframes for recaptcha\")\n\n self.driver.implicitly_wait(10)\n outeriframe = self.driver.find_element_by_tag_name('iframe')\n outeriframe.click()\n\n allIframesLen = self.driver.find_elements_by_tag_name('iframe')\n audioBtnFound = False\n audioBtnIndex = -1\n\n for index in range(len(allIframesLen)):\n self.driver.switch_to.default_content()\n iframe = self.driver.find_elements_by_tag_name(\n 'iframe'\n )[index]\n self.driver.switch_to.frame(iframe)\n self.driver.implicitly_wait(10)\n try:\n audioBtn = self.driver.find_element_by_id(\n 'recaptcha-audio-button'\n ) or \\\n self.driver.find_element_by_id(\n 'recaptcha-anchor'\n )\n action = ActionChains(self.driver)\n human_like_mouse_move(action, audioBtn)\n audioBtn.click()\n\n audioBtnFound = True\n audioBtnIndex = index\n break\n except Exception:\n pass\n\n if audioBtnFound:\n try:\n while audioBtnFound:\n href = self.driver.find_element_by_id(\n 'audio-source'\n ).get_attribute(\n 'src'\n )\n response = requests.get(href, stream=True)\n if self.previous_response == response:\n pass\n else:\n href = self.driver.find_element_by_id(\n 'audio-source'\n ).get_attribute(\n 'src'\n )\n response = requests.get(href, stream=True)\n print(\"Check audio button\")\n self.saveFile(response)\n\n print(\"Converting the mp3 audiofile to wav\")\n sound = AudioSegment.from_mp3(\"test.mp3\")\n sound = sound.export(\"test.wav\", format='wav')\n sound.close()\n\n # os.getcwd() + '/' + \"test.wav\")\n response = self.audioToText(\"test.wav\")\n\n print(\"Text from the response was: \" +\n response)\n print(\"Sending the text \"\n \"result back to captcha\")\n\n self.driver.switch_to.default_content()\n iframe = self.driver.find_elements_by_tag_name(\n 'iframe'\n )[audioBtnIndex]\n self.driver.switch_to.frame(iframe)\n\n try:\n if self.previous_response == response:\n print(\"Recaptcha solved\")\n audioBtnFound = False\n else:\n inputbtn = \\\n self.driver.find_element_by_id(\n 'audio-response'\n )\n\n inputbtn.send_keys(response)\n\n inputbtn.send_keys(Keys.ENTER)\n\n sleep(random.randint(3, 5))\n errorMsg = \\\n self.driver.\\\n find_elements_by_class_name(\n 'rc-audiochallenge'\n '-error-message'\n )[0]\n\n if errorMsg.text == \"\" or \\\n errorMsg.\\\n value_of_css_property('display') \\\n == 'none':\n\n print(\"Recaptcha solved\")\n audioBtnFound = False\n else:\n try:\n print(\"Captcha's response: \" +\n errorMsg.text)\n self.previous_response = response\n except Exception:\n print(\n \"Captcha's response: \" +\n errorMsg.value_of_css_property(\n 'display')\n )\n self.previous_response = response\n except Exception:\n print(\"Recaptcha solved\")\n audioBtnFound = False\n\n except Exception as e:\n print(e)\n print('Recaptcha temporarily banned your IP')\n try:\n self.driver.quit()\n except Exception as e:\n print(e)\n print(\"Driver Closed\")\n print(f\"{str(n)} retries left\")\n if n == 0 or n < 0:\n print(\"Unsuccessful tries\")\n os._exit(os.EX_OK)\n\n elif n == 3:\n self.captcha(n-1)\n\n else:\n proxy = get_proxies()\n self.setUp(proxy)\n self.captcha(n-1)\n else:\n print('Button not found.')\n # self.send_text_message(log_list)\n self.driver.quit()\n print(f\"{n} retries left\")\n\n if n == 0 or n < 0:\n print(\"Unsuccessful tries\")\n os._exit(os.EX_OK)\n\n elif n == 3:\n self.captcha(n-1)\n\n else:\n\n proxy = get_proxies()\n self.setUp(proxy)\n self.captcha(n-1)\n\n except Exception as e:\n print(e)\n try:\n self.driver.close()\n self.driver.quit()\n print(f\"{str(n)} retries left\")\n except Exception as e:\n print(e)\n\n if n == 0 or n < 0:\n print(\"Unsuccessful tries\")\n self.send_text_message(\n [(\"Unsuccessful try, \"\n \"please try to run \"\n \"the script chmod +x\"\n \" geckodriver && \"\n \"python collect.py\"\n \" manually on\"\n \" https://dashboard.heroku.com/\"\n \"apps/warmane-app .\")])\n sleep(15)\n os._exit(os.EX_OK)\n\n elif n == 3:\n self.captcha(n-1)\n\n else:\n\n proxy = get_proxies()\n self.setUp(proxy)\n self.captcha(n-1)\n\n # Main function\n def test_run(self):\n self.captcha(self.captcha_retries)\n\n try:\n if self.captcha_retries <= 0:\n raise Exception\n\n except Exception:\n self.something_went_wrong()\n\n if self.cookie_worked is True:\n try:\n self.driver.find_element_by_id(\n \"authCode\"\n ).send_keys(f\"{get_mfa(self.MFA)}\")\n\n self.driver.find_element_by_class_name(\"wm-ui-btn\").click()\n print(\"Passed MFA successfully.\")\n except NoSuchElementException:\n print(\"MFA wasn't requested\")\n pass\n else:\n try:\n self.driver.switch_to.default_content()\n self.driver.find_element_by_id(\n \"userID\"\n ).send_keys(self.warmane_acc)\n self.driver.find_element_by_id(\n \"userPW\"\n ).send_keys(self.warmane_pass)\n self.driver.find_element_by_xpath(\n \"//button[@type='submit']\"\n ).click()\n\n except ElementClickInterceptedException or \\\n TimeoutException or \\\n Exception as e:\n print(e)\n print(\n \"Click/Timeout interception\"\n \" happened retrying captcha.\"\n )\n self.recursive_retry()\n\n except Exception as e:\n print(\n \"The following random\"\n f\" exception has happened:{e}\")\n self.recursive_retry()\n\n print(\n \"Added UserID and Password and clicked on login\"\n )\n try:\n self.driver.implicitly_wait(10)\n except MaxRetryError as e:\n print(e)\n\n ##############################\n try:\n self.driver.find_element_by_id(\n \"authCode\"\n ).send_keys(f\"{get_mfa(self.MFA)}\")\n\n self.driver.find_element_by_class_name(\n \"wm-ui-btn\"\n ).click()\n print(\"Passed MFA successfully.\")\n\n except TimeoutException:\n self.recursive_retry()\n\n except NoSuchElementException:\n print(\"MFA wasn't requested\")\n pass\n try:\n self.driver.implicitly_wait(10)\n except MaxRetryError as e:\n print(e)\n\n try:\n points_before = self.driver.find_element_by_class_name(\"myPoints\")\n points_before = points_before.text\n self.driver.find_element_by_link_text(\"Collect points\").click()\n self.driver.refresh()\n self.driver.implicitly_wait(10)\n current_points = self.driver.find_element_by_class_name(\"myPoints\")\n if (points_before) == (current_points.text):\n self.log_list.append(\n \"You have not logged in-game today\"\n )\n self.log_list.append(\n f\"Your current points are: {current_points.text}\"\n )\n self.log_list.append(\"------------------\")\n else:\n print(\"Daily points collected successfully\")\n self.log_list.append(\"Daily points collected successfully\")\n self.log_list.append(\n f\"Your current points are: {current_points.text}\"\n )\n self.log_list.append(\"------------------\")\n try:\n self.driver.quit()\n except Exception:\n pass\n # self.save_cookies()\n # print(\"Cookies were saved\")\n\n except TimeoutException:\n self.recursive_retry()\n\n except NoSuchElementException:\n print(\"Daily points were already collected\")\n self.log_list.append(\"Daily points were already collected\")\n try:\n current_points = \\\n self.driver.find_element_by_class_name(\"myPoints\")\n except NoSuchElementException or \\\n TimeoutException:\n self.log_list = []\n self.recursive_retry()\n\n self.log_list.append(\n f\"Your current points are: {current_points.text}\"\n )\n self.log_list.append(\"------------------\")\n # self.save_cookies()\n # print(\"Cookies were saved\")\n try:\n self.driver.quit()\n except Exception:\n pass\n\n except Exception:\n self.something_went_wrong()\n\n print(\"Successful script run\")\n self.log_list.append(\"Successful script run\")\n self.send_text_message(self.log_list)\n\n try:\n self.driver.close()\n self.driver.quit()\n except Exception as e:\n print(e)\n\n def tearDown(self):\n wait_between(10.13, 15.05)\n\n\nif __name__ == \"__main__\":\n response = (unittest.main(exit=False).result.errors)\n\n if len(response) != 0:\n Warmane().send_text_message(response)\n try:\n Warmane().driver.quit()\n except Exception as e:\n print(e)\n else:\n pass\n","repo_name":"csabca83/warmane_cicd","sub_path":"collect.py","file_name":"collect.py","file_ext":"py","file_size_in_byte":21980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9912859307","text":"import cv2\nimport numpy as np\nimg = cv2.imread(\"girilmez.jpg\")\nimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nret,thrash = cv2.threshold(img,180,255,cv2.THRESH_BINARY)\ncontours, _ = cv2.findContours(thrash,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\nfor contour in contours:\n approax = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)\n cv2.drawContours(img,[approax], 0 ,[0,0,0], 1)\n x = approax.ravel() [0]\n y = approax.ravel() [1]\n if len(approax) == 3:\n cv2.putText(img, \"ucgen\", (x, y + 60), cv2.FONT_HERSHEY_COMPLEX, (0.5), (0, 0, 0))\n if len(approax) == 4:\n cv2.putText(img, \"dortgen\", (x, y + 60), cv2.FONT_HERSHEY_COMPLEX, (0.5), (0, 0, 0))\n if len(approax) == 5:\n cv2.putText(img,\"besgen\",(x,y+60),cv2.FONT_HERSHEY_COMPLEX,(0.5),(0,0,0))\n if len(approax) == 6:\n cv2.putText(img, \"altıgen\", (x, y + 60), cv2.FONT_HERSHEY_COMPLEX, (0.5), (0, 0, 0))\n if len(approax) > 8:\n cv2.putText(img,\"daire\",(x,y+60),cv2.FONT_HERSHEY_COMPLEX,(0.5),(0,0,0))\n\ncv2.imshow(\"geometric\",img)\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"Nurullah1313/GitHub","sub_path":"python _geometric_shape_dedection/geo-shape-ded.py","file_name":"geo-shape-ded.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72648086025","text":"from api.src.data_processing.data_generator import DataGenerator\nfrom PIL import Image\n\nimport os\nimport numpy as np\n\n\ndef test_get_images_blank():\n data_gen = DataGenerator(os.path.join('data', 'bland'), 1, 64, False)\n assert data_gen.get_images() == []\n\n\ndef test_get_images():\n true_values = ['data{0}letters{0}a{0}augmented.jpg'.format(os.path.sep), 'data{0}letters{0}a{0}orig.jpg'.format(os.path.sep),\n 'data{0}letters{0}b{0}augmented.jpg'.format(os.path.sep), 'data{0}letters{0}b{0}orig.jpg'.format(os.path.sep)]\n data_letters = os.path.join('data', 'letters')\n data_gen = DataGenerator(data_letters, 1, 64, False)\n assert all(img in true_values for img in data_gen.get_images()) and len(\n data_gen.get_images()) == len(true_values)\n\n\ndef test_get_class_from_filename():\n data_gen = DataGenerator(os.path.join('data', 'letters'), 1, 64, False)\n paths = data_gen.get_images()\n true_classes = sorted(['a', 'a', 'b', 'b'])\n tested = sorted([data_gen.get_class_from_path(p) for p in paths])\n assert all(x == y for x, y in zip(true_classes, tested))\n\n\ndef test_prepare_image():\n data_gen = DataGenerator(None, 32, 64, False)\n img_w = img_h = 64\n bias = np.random.rand(img_w, img_h, 1) * 64\n variance = np.random.rand(img_w, img_h, 1) * (255 - 64)\n imarray = np.random.rand(img_w, img_h, 3) * variance + bias\n im = Image.fromarray(imarray.astype('uint8')).convert('RGB')\n im = np.asarray(im)\n features = data_gen.prepare_image(im)\n assert len(features) == 2592\n assert len(features.shape) == 1\n assert features.shape == (2592,)\n assert all(0.0 < val < 1.0 for val in features)\n","repo_name":"kacperkan/gesture-classifier-training-framework","sub_path":"tests/test_data_generator.py","file_name":"test_data_generator.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9672392054","text":"'''\n SEIR/Optuna model simulation\n\n 2020-03-28\n'''\n\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nfrom scipy.integrate import odeint\nfrom scipy.optimize import minimize\nimport matplotlib.pylab as plt\nfrom matplotlib.pylab import rcParams\nimport optuna\nplt.style.use('seaborn-colorblind')\nimport math\nimport csv\nimport datetime\n\nread_csv = './data/Italy2.csv'\n#filename = './data/seir_optuna.res'\n\ncsvRow = []\ndata = {}\n\nwith open(read_csv) as f:\n reader = csv.reader(f)\n for row in reader:\n csvRow.append(row)\n\ndel csvRow[:2]\ndel csvRow[-1]\n\nfor lin in csvRow:\n date = lin[0].split('-')\n date_str = date[0]+date[1]+date[2]\n cases = int(lin[1])\n deaths = int(lin[2])\n\n data[date_str]=[date,cases,deaths]\n\ndays = list(data.keys())\nndays = len(days)\n\nX=[]\n\nfor d in range(ndays-1):\n day0 = days[d]\n day1 = days[d+1]\n dx = data[day1][1]-data[day0][1]\n X.append(dx)\n\nN = 60480000 # community size\nt_max = len(X) \ntspan = np.linspace(0.0, t_max, t_max + 1)\n\n# parameters to fit\nr0 = 0.0 #Basic reproduction number (1.5 - 3.0)\nbeta = 0.0 #product of the people exposed to each day by infected people\nsigma = 1/7 # incubation rate average 7.0 days\ngamma = 0.154 # 1/gamma = average rate of recovery or death\nI0 = 20 # Init Infected patients\n\n# Data to be fitted\ndI_observed = X\n\ndef seir(v,t):\n global r0, beta, sigma, gamma\n # v = [S, E, I, R]\n x = beta*v[0]*v[2]/N # infected rate of the day\n dS = -x # Susceptible\n dE = x - sigma * v[1] # Exposed \n dI = sigma * v[1] - gamma * v[2] #Infected \n dR = gamma * v[2] # Removed\n return np.array([dS, dE, dI, dR])\n\ndef objective(trial):\n global r0, beta, I0\n # パラメタ空間の定義\n r0 = trial.suggest_uniform('r0', 1.5, 3.0) #Reproduction number\n beta = trial.suggest_loguniform('beta', 0.1, 1.0) # infected rate by day\n #I0 = trial.suggest_uniform('I0', 0, 10000 ) # initial intected people\n ini_state = [N-I0, I0, 0, 0] # Initialstate\n ode_int = odeint(seir, ini_state, tspan) # 0日後 - tspan日後のS, I, Rを計算\n x = (ode_int[:,1])[1:] # 1日後 - tspan日後の発症者数\n return mean_squared_error(x, dI_observed) # 観測データとの2乗誤差\n\noptuna.logging.disable_default_handler()\nstudy = optuna.create_study()\nstudy.optimize(objective, n_trials=100)\nprint(\"best_value = \", study.best_value)\nprint(\"best_params = \", study.best_params)\n\n\n","repo_name":"IchiroYoshida/python_public","sub_path":"covid/calc/italy/seir_optuna.py","file_name":"seir_optuna.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"937515595","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param A : head node of linked list\n # @return the head node in the linked list\n def insertionSortList(self, A):\n a = []\n cur = A\n while cur:\n a.append(cur.val)\n cur = cur.next\n a.sort()\n cur = A\n for e in a:\n cur.val = e\n cur = cur.next\n return A","repo_name":"thong3le/InterviewBit","sub_path":"linked_lists/insertion_sort_list.py","file_name":"insertion_sort_list.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"33776815017","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport re\nimport sqlite3\n\nfrom flask import Flask, render_template, g, jsonify\n\nfrom util.spell import candidates\n\napp = Flask(__name__)\n\nDATABASE = app.root_path + '/db/sozlik.db'\n\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n db.row_factory = sqlite3.Row\n return db\n\n\n@app.teardown_appcontext\ndef close_connection(exception):\n db = getattr(g, '_database', None)\n if db is not None:\n db.close()\n\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n\n# /api/suggestion/\n@app.route(\"/api/suggestion/\")\ndef api_get_suggestion(beginswith):\n beginswith = normalize_query(beginswith)\n cur = get_db().cursor()\n cur.execute(\"select * from dictionary where word like ? limit 10\", (beginswith + '%',))\n result = cur.fetchall()\n data = []\n if result:\n for r in result:\n data.append({\n 'word': r['word'],\n 'type': r['type'],\n 'raw_word': r['raw_word'],\n 'id': r['id']\n })\n return jsonify(suggestions=data)\n\n\n# /translate//\n@app.route(\"/translate/\", defaults={'dictionary_type': ''})\n@app.route(\"/translate//\")\ndef get_translate(dictionary_type, search_word):\n search_word = normalize_query(search_word)\n dictionary_id = None\n if dictionary_type == 'qqen':\n dictionary_id = 1\n elif dictionary_type == 'ruqq':\n dictionary_id = 2\n cur = get_db().cursor()\n if dictionary_id:\n cur.execute(\"select * from dictionary where word = ? AND type = ?\", (search_word, dictionary_id))\n else:\n cur.execute(\"select * from dictionary where word = ?\", (search_word,))\n result = cur.fetchone()\n if result and result[\"type\"] == 1:\n return render_template(\"translate.html\", img_src=\"/static/images/qqen.png\", word=result[\"raw_word\"], translation=result[\"translation\"])\n elif result and result[\"type\"] == 2:\n return render_template(\"translate.html\", img_src=\"/static/images/ruqq.png\", word=result[\"raw_word\"], translation=result[\"translation\"])\n else:\n did_you_mean = candidates(search_word, get_all_words())\n return render_template(\"notfound.html\", word=search_word, did_you_mean=did_you_mean)\n\n\ndef normalize_query(search_word):\n return re.sub(u'[^a-záúıóǵńA-ZÁÚÍÓǴŃа-яёәүқөғңА-ЯЁӘҮҚӨҒҢ\\-]', '', search_word.lower())\n\n\ndef get_all_words():\n data = []\n cur = get_db().cursor()\n cur.execute(\"select word from dictionary\")\n result = cur.fetchall()\n if result:\n for r in result:\n data.append(r[\"word\"])\n return data\n","repo_name":"shagalalab/sozlik-web","sub_path":"sozik.py","file_name":"sozik.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"26619795634","text":"from datetime import date\r\natual = date.today().year\r\nsexo = str(input('Você é homem ou mulher ?')).strip().lower()\r\nif sexo == 'homem':\r\n nasc = int(input('Ano de nascimento : '))\r\n idade = atual - nasc\r\n print('Quem nasceu em {} tem {} anos em {}.'.format(nasc, idade, atual))\r\n if idade == 18:\r\n print('Você tem que se alistar \\033[31mIMEDIATAMENTE\\033[m')\r\n elif idade < 18:\r\n saldo = 18 - idade\r\n ano = atual + saldo\r\n print('Você ainda não tem 18 anos. Ainda faltam {} anos para o alistamento'.format(saldo))\r\n print('Seu alistamento será em {}'.format(ano))\r\n elif idade > 18:\r\n saldo = idade - 18\r\n ano = atual - saldo\r\n print('Você já deveria ter se alistado há {} anos. '.format(saldo))\r\n print('Seu alistamento foi em {}'.format(ano))\r\nelse:\r\n print('Você não precisa se alistar ! Você é uma mulher !')","repo_name":"KaueGuimaraes/Python-Begin-Exercises","sub_path":"ex039(nao_revisado).py","file_name":"ex039(nao_revisado).py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6988815779","text":"import os\nimport re\nimport sys\nimport json\nimport math\nimport time\nimport unicodedata\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.framework import function\nfrom tqdm import tqdm\nfrom functools import partial\n\ndef shape_list(x):\n \"\"\"\n deal with dynamic shape in tensorflow cleanly\n \"\"\"\n ps = x.get_shape().as_list()\n ts = tf.shape(x)\n return [ts[i] if ps[i] is None else ps[i] for i in range(len(ps))]\n\nclass ResultLogger(object):\n def __init__(self, path, *args, **kwargs):\n if 'time' not in kwargs:\n kwargs['time'] = time.time()\n self.f_log = open(make_path(path), 'w')\n self.f_log.write(json.dumps(kwargs)+'\\n')\n\n def log(self, **kwargs):\n if 'time' not in kwargs:\n kwargs['time'] = time.time()\n self.f_log.write(json.dumps(kwargs)+'\\n')\n self.f_log.flush()\n\n def close(self):\n self.f_log.close()\n\ndef find_trainable_variables(key):\n return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, \".*{}.*\".format(key))\n\ndef iter_data(*datas, n_batch=128, truncate=False, verbose=False, max_batches=float(\"inf\")):\n n = len(datas[0])\n if truncate:\n n = (n//n_batch)*n_batch\n n = min(n, max_batches*n_batch)\n n_batches = 0\n if verbose:\n f = sys.stderr\n else:\n f = open(os.devnull, 'w')\n for i in tqdm(range(0, n, n_batch), total=n//n_batch, file=f, ncols=80, leave=False):\n if n_batches >= max_batches: raise StopIteration\n if len(datas) == 1:\n yield datas[0][i:i+n_batch]\n else:\n yield (d[i:i+n_batch] for d in datas)\n n_batches += 1\n\n@function.Defun(\n python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),\n shape_func=lambda op: [op.inputs[0].get_shape()])\ndef convert_gradient_to_tensor(x):\n \"\"\"force gradient to be a dense tensor\n it's often faster to do dense embedding gradient on GPU than sparse on CPU\n \"\"\"\n return x\n\ndef assign_to_gpu(gpu=0, ps_dev=\"/device:CPU:0\"):\n def _assign(op):\n node_def = op if isinstance(op, tf.NodeDef) else op.node_def\n if node_def.op == \"Variable\":\n return ps_dev\n else:\n return \"/gpu:%d\" % gpu\n return _assign\n","repo_name":"ShenakhtPajouh/transformer-lm-tf.keras","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"32944163176","text":"class MaxLiklihoodMDP:\n\n def __init__(self, s, a, t, r, gamma, v, T, R):\n self.state_space = s\n self.action_space = a\n self.transition_count = t\n self.reward_sum = r\n self.discount = gamma\n self.value_funct = v\n self.transition_funct = T\n self.reward_funct = R\n\n","repo_name":"thwhite/AA228-CS238-Student","sub_path":"project2/MaxLiklihoodMDP.py","file_name":"MaxLiklihoodMDP.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"35959279358","text":"l = ['один', 'два', 'три', 'четыре', 'пять']\nl2 = []\nf1 = open('Lesson8/text_3.txt')\na = f1.readlines()\nprint(a)\ne = len(a)\nj = 0\nfor i in a:\n b = i\n c = i.find(' ')\n d = i[:c]\n r = b.replace(d, l[j])\n j = j + 1\nl2.append(r)\nf2 = open('text_3_new.txt', 'w')\nf2.writelines(l2)","repo_name":"RekhtinaLyudmila/PYTHON_HOMEWORK","sub_path":"TEST.py","file_name":"TEST.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36627381115","text":"import copy\n\nfrom aipy.ai.minimax_ai import MinimaxAI\n\n\ndef get_score(game_state, max_player):\n \"\"\"Gets the score of the given state based on the given player.\"\"\"\n if check_win(game_state, max_player):\n return 10\n elif check_win(game_state, not max_player):\n return -10\n elif no_more_moves(game_state): # tie\n return 0\n else: # not an end state\n return None\n\n\ndef no_more_moves(game_state):\n rows, cols = len(game_state), len(game_state[0])\n for row in range(rows):\n for col in range(cols):\n if game_state[row][col] is None:\n return False\n return True\n\n\ndef check_win(game_state, player):\n \"\"\"Checks if the given player has a winning combo.\"\"\"\n rows, cols = len(game_state), len(game_state[0])\n for row in range(rows):\n for col in range(cols):\n start_row = row\n start_col = col\n if _check_win_from_cell(game_state,\n start_row,\n start_col,\n player):\n return True\n return False\n\n\ndef _check_win_from_cell(game_state, start_row, start_col, player):\n rows, cols = len(game_state), len(game_state[0])\n one_d_dirs = [-1, 0, +1]\n for d_row in one_d_dirs:\n for d_col in one_d_dirs:\n plausible_end_row = start_row + d_row * (rows - 1)\n plausible_end_col = start_col + d_col * (cols - 1)\n # staying in place\n if d_row == 0 and d_col == 0:\n continue\n # index out of range\n elif (plausible_end_row < 0 or plausible_end_row >= rows or\n plausible_end_col < 0 or plausible_end_col >= cols):\n continue\n # valid direction\n else:\n if (_check_win_from_cell_in_dir(game_state,\n start_row,\n start_col,\n d_row,\n d_col,\n player)):\n return True\n return False\n\n\ndef _check_win_from_cell_in_dir(game_state, start_row, start_col, d_row, d_col, player):\n win_length = len(game_state)\n for i in range(win_length):\n row = start_row + i * d_row\n col = start_col + i * d_col\n if game_state[row][col] != player:\n return False\n return True\n\n\ndef _is_valid_move(state, move):\n \"\"\"\n Returns True if a move (represented by a '(row, col)' tuple) is valid.\n \"\"\"\n (row, col) = move\n rows, cols = len(state), len(state[0])\n if (row >= rows or row < 0 or\n col >= cols or col < 0 or\n state[row][col] is not None):\n return False\n else:\n return True\n\n\ndef get_possible_states(state, player):\n \"\"\"Returns an array of all possible game states one move away\"\"\"\n rows, cols = len(state), len(state[0])\n possible_game_states = []\n for row in range(rows):\n for col in range(cols):\n if _is_valid_move(state, (row, col)):\n temp_state = copy.deepcopy(state)\n temp_state[row][col] = player\n possible_game_states.append(temp_state)\n return possible_game_states\n\n\ndef pprint_list(a):\n for elem in a:\n print(elem)\n print('\\n')\n\n\nstate = [[True, False, True],\n\n [None, True, None],\n\n [False, False, None]]\n\n# print(get_score(state, max_player=True))\n\nai = MinimaxAI(heuristic_fn=get_score,\n state_fn=get_possible_states)\n\nchoice = ai.compute(state=state)\n\npprint_list(choice)\n\na = [[True, False, True],\n [None, True, None],\n [False, False, True]]\n\n\n","repo_name":"smacpher/aipy","sub_path":"examples/tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"41154895826","text":"class Solution:\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n \n visited = set()\n rows, cols = len(board), len(board[0]) \n for r in range(rows):\n for c in range(cols):\n if board[r][c] == \".\":\n continue\n s1 = str(board[r][c]) + \" in row \" + str(r)\n s2 = str(board[r][c]) + \" in col \" + str(c)\n s3 = str(board[r][c]) + \" in grid \" + str(c // 3) + \" - \" + str(r // 3)\n # print(s1, s2, s3)\n if s1 in visited or s2 in visited or s3 in visited:\n return False\n else:\n visited.add(s1)\n visited.add(s2)\n visited.add(s3)\n return True","repo_name":"tejeshreddy/competitive-programming","sub_path":"36-valid-sudoku/36-valid-sudoku.py","file_name":"36-valid-sudoku.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73701076424","text":"from random import randint\nfrom airflow import DAG\nfrom airflow.operators.python import PythonOperator, BranchPythonOperator\nfrom airflow.operators.bash import BashOperator\nimport datetime as dt\n\n\ndef _best_model(ti):\n accuracies = ti.xcom_pull(\n task_ids=['model_A', 'model_B', 'model_C']\n )\n best_accuracy = max(accuracies)\n if best_accuracy > 8:\n return 'accurate'\n else:\n return 'inaccurate'\n\n\ndef _training_model():\n return randint(1, 10)\n\n\nwith DAG('dag1', start_date=dt.datetime(2022, 1, 1), schedule_interval=\"@daily\") as dag:\n model_A = PythonOperator(\n task_id='model_A',\n python_callable=_training_model\n )\n\n model_B = PythonOperator(\n task_id='model_B',\n python_callable=_training_model\n )\n\n model_C = PythonOperator(\n task_id='model_C',\n python_callable=_training_model\n )\n\n best_model = BranchPythonOperator(\n task_id='best_model',\n python_callable=_best_model\n )\n\n accurate = BashOperator(\n task_id='accurate',\n bash_command=\"echo 'accurate'\"\n )\n\n inaccurate = BashOperator(\n task_id='inaccurate',\n bash_command=\"echo 'inaccurate'\"\n )\n\n # >> downstream\n # << upstream\n\n [model_A, model_B, model_C] >> best_model >> [accurate, inaccurate]","repo_name":"Dmarchand97/V360pipelinedemo","sub_path":"AES/DAGpt1/airflow-docker/dags/dag1.py","file_name":"dag1.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21445459680","text":"import json\nimport pathlib\nimport shutil\nfrom dataclasses import dataclass, asdict\nfrom typing import Optional\nimport webbrowser\n\nimport pandas as pd\nfrom PIL import Image, ImageDraw\nfrom docx2txt import docx2txt\nfrom easyocr import easyocr\nfrom tqdm import tqdm\nfrom distutils.dir_util import copy_tree\n\nfrom model import predict_tg, predict_dzen, predict_vk, predict_yt\n\nreader = easyocr.Reader([\"ru\", \"en\"])\n\nPLATFORMS = {\n \"tg\": {\"metrics\": [\"ERR\"], \"function\": predict_tg},\n \"zn\": {\"metrics\": [\"Количество дочитываний\"], \"function\": predict_dzen},\n \"vk\": {\"metrics\": [\"Количество подписчиков\"], \"function\": predict_vk},\n \"yt\": {\"metrics\": [\"Подписчики\", \"Просмотры\"], \"function\": predict_yt},\n}\n\nIDS = {\"tg\": 1, \"vk\": 2, \"yt\": 3, \"zn\": 4,}\n\nALLOW_EXTENSIONS = [\".jpg\", \".png\", \".PNG\", \".jpeg\", \".JPG\", \".docx\"]\n\n\n@dataclass\nclass Metrica:\n name: str\n value: Optional[str]\n\n\n@dataclass\nclass Result:\n id: str\n platform: str\n predicted_platform: Optional[str]\n original_file: str\n processed_file: Optional[str]\n metrics: list[Metrica]\n\n\ndef process_excel() -> dict:\n result = {}\n\n for blog_id, image in pd.read_excel(\"data.xlsx\").values:\n result[pathlib.Path(image)] = blog_id\n\n return result\n\n\ndef get_images(dir: str) -> list[tuple[pathlib.Path, pathlib.Path]]:\n result = []\n\n for p in pathlib.Path(f\"{dir}/images\").iterdir():\n if p.is_file() and p.suffix in ALLOW_EXTENSIONS:\n if p.suffix == \".docx\":\n temp_dir = (pathlib.Path(dir) / \"tmp\")\n temp_dir.mkdir(exist_ok=True)\n docx2txt.process(str(p), temp_dir)\n\n image_path = next(temp_dir.iterdir())\n\n if image_path is not None:\n save_path = pathlib.Path(f\"{dir}/images\") / f\"{p.name}{image_path.suffix}\"\n shutil.copyfile(image_path, save_path)\n result.append((p, save_path))\n\n shutil.rmtree(temp_dir)\n else:\n result.append((p, None))\n\n return result\n\n\ndef process_image(platform: str, image: pathlib.Path, blog_id: str) -> Result:\n filename = str(image)\n platform_info = PLATFORMS[platform]\n\n result = platform_info[\"function\"](filename, reader)\n\n if result is None or (\n platform == \"yt\" and result[0][0] is None and result[0][1] is None\n ):\n return Result(\n id=IDS[platform],\n original_file=image.name.lower(),\n processed_file=None,\n metrics=[],\n platform=platform,\n predicted_platform=None\n )\n\n pillow = Image.open(filename)\n draw = ImageDraw.Draw(pillow)\n\n # Для ютуба result такой\n # (['640', 23300.0], [[1140, 442, 1228, 490], [1509, 643, 1585, 663]])\n if platform == \"yt\":\n for box in result[1]:\n draw.rectangle(box, outline=(255, 0, 0), width=2)\n else:\n draw.rectangle(result[1], outline=(255, 0, 0), width=2)\n\n processed_path = pathlib.Path(f\"{platform}/processed_images\")\n processed_path.mkdir(parents=True, exist_ok=True)\n processed_image_filename = str((processed_path / image.name))\n\n pillow.save(processed_image_filename)\n\n if platform == \"yt\":\n metrics = []\n\n for count, value in enumerate(result[0]):\n metrics.append(Metrica(name=platform_info[\"metrics\"][count], value=value))\n else:\n metrics = [Metrica(name=platform_info[\"metrics\"][0], value=result[0])]\n\n return Result(\n id=IDS[platform],\n original_file=image.name.lower(),\n processed_file=image.name.lower(),\n metrics=metrics,\n platform=platform,\n predicted_platform=result[-1]\n )\n\n\ndef generate_excel(platform: str, results: list[Result]):\n platform_info = PLATFORMS[platform]\n frame_data = []\n\n columns = []\n\n for metrica in platform_info[\"metrics\"]:\n columns.append(metrica)\n\n columns.append(\"image\")\n\n for result in results:\n data = []\n if len(result.metrics) > 0:\n for metrica in result.metrics:\n if metrica is None:\n data.append(None)\n else:\n data.append(metrica.value)\n else:\n for i in range(len(platform_info[\"metrics\"])):\n data.append(\"invalid\")\n\n image_filename = pathlib.Path(result.original_file).name\n\n data.append(image_filename)\n frame_data.append(data)\n\n df = pd.DataFrame(frame_data, columns=columns)\n df.to_excel(f\"{platform}/{platform}.xlsx\", index=False)\n\n\ndef main() -> None:\n image_to_blog = process_excel()\n\n images = []\n\n for platform in PLATFORMS:\n for original_file, extracted in get_images(platform):\n images.append((platform, original_file, extracted))\n\n pbar = tqdm(images)\n\n results = {platform: [] for platform in PLATFORMS}\n\n for platform, image, extracted in pbar:\n if image not in image_to_blog:\n continue\n\n image_name = (extracted or image).name\n pbar.set_description(\n f\"Обработка платформы: {platform}, изображение: {image_name}\"\n )\n result = process_image(platform, extracted or image, image_to_blog[image])\n results[platform].append(result)\n\n processed_images = {}\n\n for platform, results in results.items():\n generate_excel(platform, results)\n dump = [asdict(item) for item in results]\n json.dump(\n dump,\n pathlib.Path(f\"src/{platform}.json\").open(\"w\"),\n )\n\n processed_images[platform] = []\n\n for result in results:\n if result.processed_file is not None:\n processed_images[platform].append(result.processed_file)\n\n json.dump(processed_images, pathlib.Path(\"src/processed_images.json\").open(\"w\"))\n\n copy_tree(\"tg\", \"src/tg\")\n copy_tree(\"vk\", \"src/vk\")\n copy_tree(\"yt\", \"src/yt\")\n copy_tree(\"zn\", \"src/zn\")\n\n webbrowser.open_new_tab(\"http://localhost:8080\")\n\nmain()\n","repo_name":"YarickVodila/Top-Blog-Parsing-KPI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74372796105","text":"from datetime import datetime\nimport os\n\ndef val_log_saver(test_name, model_results, train_test_opt):\n now = datetime.now()\n date_time = now.strftime(\"%d_%m_%Y__%H_%M\")\n\n if not os.path.exists(\"./test_results/\"):\n os.makedirs(\"./test_results/\")\n\n f = open(\"./test_results/\" \n + date_time \n + \"__\" \n + train_test_opt\n + \".txt\", \"w\")\n\n f.write(test_name + \"\\n\")\n f.write(\"------------------------------------------\" + \"\\n\")\n for val in model_results[train_test_opt]:\n # print(val)\n f.write(str(val)+\"\\n\")\n\n f.close()","repo_name":"mertyagmur/breast-cancer-classifier","sub_path":"util/util_logger.py","file_name":"util_logger.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71508810185","text":"import os\nimport logging\nimport copy\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\nfrom crowd_sim.envs.utils.info import *\n\nclass Explorer(object):\n def __init__(self, env, robot, device, writer, memory=None, gamma=None, target_policy=None):\n self.env = env\n self.robot = robot\n self.device = device\n self.writer = writer\n self.memory = memory\n self.gamma = 0.95\n self.target_policy = target_policy\n self.statistics = None\n self.use_noisy_net = False\n\n # @profile\n def run_k_episodes(self, k, phase, update_memory=False, imitation_learning=False, episode=None, epoch=None,\n print_failure=False):\n self.robot.policy.set_phase(phase)\n success_times = []\n collision_times = []\n timeout_times = []\n success = 0\n collision = 0\n timeout = 0\n discomfort = 0\n min_dist = []\n cumulative_rewards = []\n average_returns = []\n returns_list = []\n collision_cases = []\n timeout_cases = []\n discomfort_nums = []\n if phase in ['test', 'val'] or imitation_learning:\n pbar = tqdm(total=k)\n else:\n pbar = None\n if self.robot.policy.name in ['model_predictive_rl', 'tree_search_rl']:\n if phase in ['test', 'val'] and self.use_noisy_net:\n self.robot.policy.model[2].eval()\n else:\n self.robot.policy.model[2].train()\n\n for i in range(k):\n ob = self.env.reset(phase)\n done = False\n states = []\n actions = []\n rewards = []\n dones = []\n num_discoms =[]\n while not done:\n num_discom = 0\n action, action_index = self.robot.act(ob)\n ob, reward, done, info = self.env.step(action)\n states.append(self.robot.policy.last_state)\n # for TD3rl, append the velocity and theta\n actions.append(action_index)\n # if phase in ['train', 'test']:\n # self.env.render(mode='debug')\n # actually, final states of timeout cases is not terminal states\n if isinstance(info, Timeout):\n dones.append(False)\n else:\n dones.append(done)\n rewards.append(reward)\n if isinstance(info, Discomfort):\n discomfort += 1\n min_dist.append(info.min_dist)\n num_discom = info.num\n num_discoms.append(num_discom)\n # add the terminal state\n states.append(self.robot.get_state(ob))\n if isinstance(info, ReachGoal):\n success += 1\n success_times.append(self.env.global_time)\n elif isinstance(info, Collision):\n collision += 1\n collision_cases.append(i)\n collision_times.append(self.env.global_time)\n if phase in ['test']:\n print('collision happen %f', self.env.global_time)\n elif isinstance(info, Timeout):\n timeout += 1\n timeout_cases.append(i)\n if phase in ['test']:\n print('timeout happen %f', self.env.global_time)\n rewards[-1] = rewards[-1]\n timeout_times.append(self.env.time_limit)\n else:\n raise ValueError('Invalid end signal from environment')\n\n if update_memory:\n self.update_memory(states, actions, rewards, dones, imitation_learning)\n discomfort_nums.append(sum(num_discoms))\n # cumulative_rewards.append(sum([pow(self.gamma, t * self.robot.time_step * self.robot.v_pref)\n # * reward for t, reward in enumerate(rewards)]))\n cumulative_rewards.append(sum(rewards))\n returns = []\n for step in range(len(rewards)):\n step_return = sum([pow(self.gamma, t * self.robot.time_step * self.robot.v_pref)\n * reward for t, reward in enumerate(rewards[step:])])\n returns.append(step_return)\n returns_list = returns_list + returns\n average_returns.append(average(returns))\n\n if pbar:\n pbar.update(1)\n\n success_rate = success / k\n collision_rate = collision / k\n assert success + collision + timeout == k\n avg_nav_time = sum(success_times) / len(success_times) if success_times else self.env.time_limit\n avg_col_time = sum(collision_times) / len(collision_times) if collision_times else self.env.time_limit\n extra_info = '' if episode is None else 'in episode {} '.format(episode)\n extra_info = extra_info + '' if epoch is None else extra_info + ' in epoch {} '.format(epoch)\n logging.info('{:<5} {}has success rate: {:.3f}, collision rate: {:.3f}, nav time: {:.3f}, col time: {:.3f}, total reward: {:.4f},'\n ' average return: {:.4f}'. format(phase.upper(), extra_info, success_rate, collision_rate,\n avg_nav_time, avg_col_time, sum(cumulative_rewards),\n average(average_returns)))\n # if phase in ['val', 'test'] or imitation_learning:\n total_time = sum(success_times + collision_times + timeout_times) / self.robot.time_step\n logging.info('Frequency of being in danger: %.3f and average min separate distance in danger: %.2f',\n discomfort / total_time, average(min_dist))\n logging.info('discomfor nums is %.0f and return is %.04f and length is %.0f', sum(discomfort_nums),\n average(returns_list), len(returns_list))\n if print_failure:\n logging.info('Collision cases: ' + ' '.join([str(x) for x in collision_cases]))\n logging.info('Timeout cases: ' + ' '.join([str(x) for x in timeout_cases]))\n\n self.statistics = success_rate, collision_rate, avg_nav_time, avg_col_time, sum(cumulative_rewards), average(average_returns), discomfort, total_time\n\n return self.statistics\n\n def update_memory(self, states, actions, rewards, dones, imitation_learning=False):\n if self.memory is None or self.gamma is None:\n raise ValueError('Memory or gamma value is not set!')\n \n for i, state in enumerate(states[:-1]):\n reward = rewards[i]\n\n # VALUE UPDATE\n if imitation_learning:\n # define the value of states in IL as cumulative discounted rewards, which is the same in RL\n state = self.target_policy.transform(state)\n action = actions[i]\n done = torch.Tensor([dones[i]]).to(self.device)\n next_state = self.target_policy.transform(states[i+1])\n value = sum([pow(self.gamma, (t - i) * self.robot.time_step * self.robot.v_pref) * reward *\n (1 if t >= i else 0) for t, reward in enumerate(rewards)])\n else:\n next_state = states[i+1]\n action = actions[i]\n if i == len(states) - 1:\n # terminal state\n value = reward\n else:\n value = 0\n value = torch.Tensor([value]).to(self.device)\n reward = torch.Tensor([rewards[i]]).to(self.device)\n done = torch.Tensor([dones[i]]).to(self.device)\n\n if self.target_policy.name == 'ModelPredictiveRL' or self.target_policy.name == 'TreeSearchRL':\n self.memory.push((state[0], state[1], action, value, done, reward, next_state[0], next_state[1]))\n elif self.target_policy.name == 'TD3RL':\n state = rotate_state2(state)\n next_state = rotate_state2(next_state)\n self.memory.push((state, action, value, done, reward, next_state))\n elif self.target_policy.name == 'RGCNRL':\n self.memory.push((state, action, value, done, reward, next_state))\n else:\n self.memory.push((state, value, done, reward, next_state))\n\n def log(self, tag_prefix, global_step):\n sr, cr, time, reward, avg_return, _, _, _ = self.statistics\n self.writer.add_scalar(tag_prefix + '/success_rate', sr, global_step)\n self.writer.add_scalar(tag_prefix + '/collision_rate', cr, global_step)\n self.writer.add_scalar(tag_prefix + '/time', time, global_step)\n self.writer.add_scalar(tag_prefix + '/reward', reward, global_step)\n self.writer.add_scalar(tag_prefix + '/avg_return', avg_return, global_step)\n\n\ndef average(input_list):\n if input_list:\n return sum(input_list) / len(input_list)\n else:\n return 0\n\n\ndef rotate_state(state):\n \"\"\"\n Transform the coordinate to agent-centric.\n Input tuple include robot state tensor and human state tensor.\n robot state tensor is of size (batch_size, number, state_length)(for example 100*1*9)\n human state tensor is of size (batch_size, number, state_length)(for example 100*5*5)\n \"\"\"\n # for robot\n # 'px', 'py', 'vx', 'vy', 'radius', 'gx', 'gy', 'v_pref', 'theta'\n # 0 1 2 3 4 5 6 7 8\n # for human\n # 'px', 'py', 'vx', 'vy', 'radius'\n # 0 1 2 3 4\n # for obstacle\n # 'px', 'py', 'radius'\n # 0 1 2\n # for wall\n # 'sx', 'sy', 'ex', 'ey'\n # 0 1 2 3\n assert len(state[0].shape) == 2\n if state[1] is None:\n robot_state = state[0]\n robot_feature_dim = state[0].shape[1]\n human_feature_dim = 5\n dx = robot_state[:, 5] - robot_state[:, 0]\n dy = robot_state[:, 6] - robot_state[:, 1]\n dx = dx.unsqueeze(1)\n dy = dy.unsqueeze(1)\n radius_r = robot_state[:, 4].unsqueeze(1)\n dg = torch.norm(torch.cat([dx, dy], dim=1), 2, dim=1, keepdim=True)\n rot = torch.atan2(dy, dx)\n cos_rot = torch.cos(rot)\n sin_rot = torch.sin(rot)\n vx = (robot_state[:, 2].unsqueeze(1) * cos_rot +\n robot_state[:, 3].unsqueeze(1) * sin_rot).reshape((1, -1))\n vy = (robot_state[:, 3].unsqueeze(1) * cos_rot -\n robot_state[:, 2].unsqueeze(1) * sin_rot).reshape((1, -1))\n v_pref = robot_state[:, 7].unsqueeze(1)\n theta = robot_state[:, 8].unsqueeze(1)\n px_r = torch.zeros_like(v_pref)\n py_r = torch.zeros_like(v_pref)\n new_robot_state = torch.cat((px_r, py_r, vx, vy, radius_r, dg, rot, v_pref, theta), dim=1)\n new_state = (new_robot_state, None)\n return new_state\n else:\n robot_state = state[0]\n human_state = state[1]\n obstacle_state = state[2]\n wall_state = state[3]\n human_num = human_state.shape[0]\n robot_num = robot_state.shape[0]\n obstacle_num = obstacle_state.shape[0]\n wall_num = wall_state.shape[1]\n dx = robot_state[:, 5] - robot_state[:, 0]\n dy = robot_state[:, 6] - robot_state[:, 1]\n dx = dx.unsqueeze(1)\n dy = dy.unsqueeze(1)\n dg = torch.norm(torch.cat([dx, dy], dim=1), 2, dim=1, keepdim=True)\n rot = torch.atan2(dy, dx)\n cos_rot = torch.cos(rot)\n sin_rot = torch.sin(rot)\n transform_matrix = torch.cat((cos_rot, -sin_rot, sin_rot, cos_rot), dim=0).reshape(2, 2)\n a = robot_state[:, 2:4]\n robot_velocities = torch.mm(robot_state[:, 2:4], transform_matrix)\n radius_r = robot_state[:, 4].unsqueeze(1)\n v_pref = robot_state[:, 7].unsqueeze(1)\n target_heading = torch.zeros_like(radius_r)\n pos_r = torch.zeros_like(robot_velocities)\n cur_heading = (robot_state[:, 8].unsqueeze(1) - rot + np.pi) % (2 * np.pi) - np.pi\n new_robot_state = torch.cat((robot_velocities, dg, v_pref, cur_heading),\n dim=1)\n\n human_positions = human_state[:, 0:2] - robot_state[:, 0:2]\n human_positions = torch.mm(human_positions, transform_matrix)\n human_velocities = human_state[:, 2:4]\n human_velocities = torch.mm(human_velocities, transform_matrix)\n human_radius = human_state[:, 4].unsqueeze(1) + 0.3\n new_human_state = torch.cat((human_positions, human_velocities, human_radius), dim=1)\n\n wall_start_positions = wall_state[:, 0:2] - robot_state[:, 0:2]\n wall_start_positions = torch.mm(wall_start_positions, transform_matrix)\n wall_end_positions = wall_state[:, 2:4] - robot_state[:, 0:2]\n wall_end_positions = torch.mm(wall_end_positions, transform_matrix)\n wall_radius = torch.zeros((wall_state.shape[0], 1)) + 0.3\n new_wall_states = torch.cat((wall_start_positions, wall_end_positions, wall_radius), dim=1)\n if len(obstacle_state.shape) == 2:\n obstacle_positions = obstacle_state[:, 0:2] - robot_state[:, 0:2]\n obstacle_positions = torch.mm(obstacle_positions, transform_matrix)\n obstacle_radius = obstacle_state[:, 2].unsqueeze(1) + 0.3\n new_obstacle_states = torch.cat((obstacle_positions, obstacle_radius), dim=1)\n robot_feature_dim = new_robot_state.shape[1]\n human_feature_dim = new_human_state.shape[1]\n obstacle_feature_dim = new_obstacle_states.shape[1]\n wall_feature_dim = new_wall_states.shape[1]\n robot_zero_feature = torch.zeros([robot_num, human_feature_dim + obstacle_feature_dim + wall_feature_dim])\n human_zero_feature1 = torch.zeros([human_num, robot_feature_dim])\n human_zero_feature2 = torch.zeros([human_num, obstacle_feature_dim + wall_feature_dim])\n obstacle_zero_feature1 = torch.zeros([obstacle_num, robot_feature_dim + human_feature_dim])\n obstacle_zero_feature2 = torch.zeros([obstacle_num, wall_feature_dim])\n wall_zero_feature = torch.zeros([wall_num, robot_feature_dim + human_feature_dim + obstacle_feature_dim])\n new_robot_state = torch.cat((new_robot_state, robot_zero_feature), dim=1)\n new_human_state = torch.cat((human_zero_feature1, new_human_state, human_zero_feature2), dim=1)\n new_obstacle_states = torch.cat((obstacle_zero_feature1, new_obstacle_states, obstacle_zero_feature2),\n dim=1)\n new_wall_states = torch.cat((wall_zero_feature, new_wall_states), dim=1)\n\n new_state = torch.cat((new_robot_state, new_human_state, new_obstacle_states, new_wall_states), dim=0)\n else:\n robot_feature_dim = new_robot_state.shape[1]\n human_feature_dim = new_human_state.shape[1]\n obstacle_feature_dim = 3\n wall_feature_dim = new_wall_states.shape[1]\n robot_zero_feature = torch.zeros([robot_num, human_feature_dim + obstacle_feature_dim + wall_feature_dim])\n human_zero_feature1 = torch.zeros([human_num, robot_feature_dim])\n human_zero_feature2 = torch.zeros([human_num, obstacle_feature_dim + wall_feature_dim])\n obstacle_zero_feature1 = torch.zeros([obstacle_num, robot_feature_dim + human_feature_dim])\n obstacle_zero_feature2 = torch.zeros([obstacle_num, wall_feature_dim])\n wall_zero_feature = torch.zeros([wall_num, robot_feature_dim + human_feature_dim + obstacle_feature_dim])\n new_robot_state = torch.cat((new_robot_state, robot_zero_feature), dim=1)\n new_human_state = torch.cat((human_zero_feature1, new_human_state, human_zero_feature2), dim=1)\n new_wall_states = torch.cat((wall_zero_feature, new_wall_states), dim=1)\n new_state = torch.cat((new_robot_state, new_human_state, new_wall_states), dim=0)\n return new_state\n\n\ndef rotate_state2(state):\n \"\"\"\n Transform the coordinate to agent-centric.\n Input tuple include robot state tensor and human state tensor.\n robot state tensor is of size (batch_size, number, state_length)(for example 100*1*9)\n human state tensor is of size (batch_size, number, state_length)(for example 100*5*5)\n \"\"\"\n # for robot\n # 'px', 'py', 'vx', 'vy', 'radius', 'gx', 'gy', 'v_pref', 'theta'\n # 0 1 2 3 4 5 6 7 8\n # for human\n # 'px', 'py', 'vx', 'vy', 'radius'\n # 0 1 2 3 4\n # for obstacle\n # 'px', 'py', 'radius'\n # 0 1 2\n # for wall\n # 'sx', 'sy', 'ex', 'ey'\n # 0 1 2 3\n assert len(state[0].shape) == 2\n robot_state = state[0]\n human_state = state[1]\n obstacle_state = state[2]\n wall_state = state[3]\n human_num = human_state.shape[0]\n robot_num = robot_state.shape[0]\n obstacle_num = obstacle_state.shape[0]\n wall_num = wall_state.shape[0]\n\n dx = robot_state[:, 5] - robot_state[:, 0]\n dy = robot_state[:, 6] - robot_state[:, 1]\n dx = dx.unsqueeze(1)\n dy = dy.unsqueeze(1)\n dg = torch.norm(torch.cat([dx, dy], dim=1), 2, dim=1, keepdim=True)\n rot = torch.atan2(dy, dx)\n cos_rot = torch.cos(rot)\n sin_rot = torch.sin(rot)\n transform_matrix = torch.cat((cos_rot, -sin_rot, sin_rot, cos_rot), dim=0).reshape(2, 2)\n a = robot_state[:, 2:4]\n robot_velocities = torch.mm(robot_state[:, 2:4], transform_matrix)\n radius_r = robot_state[:, 4].unsqueeze(1)\n v_pref = robot_state[:, 7].unsqueeze(1)\n target_heading = torch.zeros_like(radius_r)\n pos_r = torch.zeros_like(robot_velocities)\n cur_heading = (robot_state[:, 8].unsqueeze(1) - rot + np.pi) % (2 * np.pi) - np.pi\n new_robot_state = torch.cat((robot_velocities, dg, v_pref, cur_heading),\n dim=1)\n\n human_positions = human_state[:, 0:2] - robot_state[:, 0:2]\n human_positions = torch.mm(human_positions, transform_matrix)\n human_velocities = human_state[:, 2:4]\n human_velocities = torch.mm(human_velocities, transform_matrix)\n human_radius = human_state[:, 4].unsqueeze(1) + 0.3\n new_human_state = torch.cat((human_positions, human_velocities, human_radius), dim=1)\n\n if len(obstacle_state.shape) == 2:\n obstacle_positions = obstacle_state[:, 0:2] - robot_state[:, 0:2]\n obstacle_positions = torch.mm(obstacle_positions, transform_matrix)\n obstacle_radius = obstacle_state[:, 2].unsqueeze(1) + 0.3\n obstacle_velocity = torch.zeros_like(obstacle_positions)\n obs_human = torch.cat((obstacle_positions, obstacle_velocity, obstacle_radius), dim=1)\n new_human_state = torch.cat((new_human_state, obs_human), dim=0)\n new_obstacle_states = torch.cat((obstacle_positions, obstacle_radius), dim=1)\n robot_feature_dim = new_robot_state.shape[1]\n human_feature_dim = new_human_state.shape[1]\n obstacle_feature_dim = new_obstacle_states.shape[1]\n wall_feature_dim = 5\n robot_zero_feature = torch.zeros(\n [robot_num, human_feature_dim + obstacle_feature_dim + wall_feature_dim])\n human_zero_feature1 = torch.zeros([human_num + obstacle_num, robot_feature_dim])\n human_zero_feature2 = torch.zeros([human_num + obstacle_num, obstacle_feature_dim + wall_feature_dim])\n new_robot_state = torch.cat((new_robot_state, robot_zero_feature), dim=1)\n new_human_state = torch.cat((human_zero_feature1, new_human_state, human_zero_feature2), dim=1)\n new_state = torch.cat((new_robot_state, new_human_state),\n dim=0)\n return new_state","repo_name":"nubot-nudt/HGAT-DRL","sub_path":"crowd_nav/utils/explorer.py","file_name":"explorer.py","file_ext":"py","file_size_in_byte":19733,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"30123980625","text":"import threading\nimport traceback\nimport time as t\nimport re #regular expression\nfrom pytz import timezone\nimport datetime as dt\nfrom datetime import datetime, timedelta, time\n\n# Third Party\nfrom ib_insync import IB, Stock\n\n# Technical Analysis\nimport pandas\nfrom pandas.core.frame import DataFrame\nimport pandas_ta as ta\n\n\n# Custom\nfrom datastore.earnings_calendar import EarningsCalendar\nfrom datastore.market_data import MarketDataGrabber\nfrom datastore.target_cache import TargetCache\nfrom datastore.trades import TradeCache\n\nfrom notifications.notifications import Notifier\n\n\nHOURS_OF_OPERATION_START = 9 # 9 AM\nHOURS_OF_OPERATION_END = 17 # 5 PM\n\nMINIUMUM_DAILY_CANDLES = 10\nMINIUMUM_5M_CANDLES = 78\n\nclass Octopus:\n\n def __init__(self, settings):\n\n self.settings = settings\n\n def _clean(self):\n\n print(\"Cleaning up\")\n # Don't clean these, it will break the main loop\n # All Datetimes\n #self.lastTick = -1 \n #self.lastInterval = -1 \n #self.currentTick = -1 \n #self.currentInterval = -1\n\n self.frequency = 5 #mins\n \n \n self.targetDict = {}\n # key = symbol\n # value = {\n # 'symbol': 'AMZN'\n # 'contract': ,\n # 'detail': ,\n # 'market_state': \"Open\" #or \"Closed\"\n # 'market_open_today': True/False\n # 'market_open_time': DateTime\n # 'market_close_time': DateTime\n #\n # 'earningsDate': DateTime\n # 'earningsTiming': DateTime\n #\n # '1d': DataFrame\n # '15m: DataFrame\n # 'gap_pc': float - Gap Up/Down Percentage \n # 'gap_notified': True/False - Has notified via Telegram\n # }\n\n self.industryDict = {}\n # key = industry\n # value = [\"symbol\", \"symbol\"]\n\n self.unknownSymbols = []\n self.pinkSymbols = []\n self.noMarketDataPermissionSymbols = []\n\n self.earningsTargets = {} #dataframe\n\n self.marketOpen = False\n self.twsConnectionLost = False\n self.apiLimitReached = False \n self.noMarketDataPermission = False\n\n\n def run(self, strategy, backTest=False, backTestDateStartStr=\"\", backTestDateEndStr=\"\"):\n self.backTest = backTest\n self.strategy = strategy(self)\n\n self.timezone = timezone('EST')\n self.twsRetryCount = 0\n self.twsClientID = self.settings[\"TWS\"][\"CLIENTID\"]\n\n instanceID = self.settings[\"INSTANCEID\"]\n if self.backTest:\n nowStr = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n \n self.instanceID = f\"{instanceID} {nowStr}\"\n\n self.backTestDateStart = datetime.now().strptime(backTestDateStartStr, \"%Y-%m-%d\").replace(tzinfo=self.timezone)\n self.backTestDateEnd = datetime.now().strptime(backTestDateEndStr, \"%Y-%m-%d\").replace(tzinfo=self.timezone)\n self.backTestError = False\n else:\n self.instanceID = instanceID\n\n\n self.lastTick = -1\n self.lastInterval = -1\n\n self.polling_thread = None\n self.message_queue = []\n\n while True:\n\n self.ib = None\n \n self.earningsCalendar = EarningsCalendar()\n self.marketDataGrabber = MarketDataGrabber(self)\n self.trades = TradeCache(self)\n\n if self.backTest:\n self.targetCache = TargetCache()\n\n if not self.backTest:\n self.lastTick = -1\n self.lastInterval = -1\n\n try:\n self._clean()\n self._connectTWS()\n\n self.notifier = Notifier(self)\n self.ib.sleep(5) # Give Discord/Telegram some time to load\n self.notifier.NotifyLoading()\n \n # Bot Loop\n while (True):\n \n firstLoad = self.lastTick == -1\n firstInterval = self.lastInterval == -1\n\n if self.backTest:\n if firstLoad:\n\n self.notifier.NotifyBackTestingStart(backTestDateStartStr, backTestDateEndStr)\n self.currentTick = datetime.combine(self.backTestDateStart, time(tzinfo=self.timezone)) \n else:\n\n if self.backTestError:\n self.backTestError = False\n else:\n self.currentTick = self.lastTick + timedelta(minutes = self.strategy.BackTestFrequency)\n if self.currentTick >= (self.backTestDateEnd + timedelta(days=1)):\n self.notifier.NotifyBackTestingEnd()\n break\n\n else:\n self.currentTick = datetime.now(self.timezone)\n\n self.currentInterval = self.currentTick - timedelta(\n minutes=self.currentTick.minute % self.frequency,\n seconds=self.currentTick.second,\n microseconds=self.currentTick.microsecond)\n\n \n newDay = firstLoad or (self.lastTick.date() != self.currentTick.date())\n\n if firstLoad and not self.backTest:\n self.notifier.NotifyESTTime()\n\n elif newDay and (not self.backTest):\n self.notifier.NotifyNewDay() \n \n # Load Earnings Targets\n if newDay:\n self._clean() \n if firstLoad or newDay:\n self._identifyEarningsTargets()\n \n if len(self.earningsTargets) > 0:\n # Load Targets\n self._loadTargets(allowCache=self.backTest)\n self.notifier.NotifyEarningsTargetsShort()\n self.notifier.NotifyEarningsTargetsLong()\n\n # Load Market Data\n self._loadMarketData() \n if self.apiLimitReached:\n raise Exception(\"API Limit Reached\")\n if self.twsConnectionLost:\n raise Exception(\"TWS Connection Lost\")\n\n \n if firstInterval or (self.currentInterval > self.lastInterval):\n\n # New Interval!\n print(f\"Processing Interval: {self.currentInterval.strftime('%a %d %B, %H:%M:%S')}\")\n self.lastInterval = self.currentInterval\n\n # Check Market State\n if not self.backTest:\n market_open = False\n for symbol, target in self.targetDict.items():\n if ('market_open_today' in target) and ('market_close_time' in target):\n market_open = target['market_open_today'] and self.currentInterval >= target['market_open_time'] and self.currentInterval < target['market_close_time']\n if market_open:\n break\n if (self.marketOpen != market_open) or (self.lastTick == -1):\n self.marketOpen = market_open\n self.notifier.NotifyEarningTargetsMarketState()\n else:\n self.marketOpen = market_open\n\n if self.marketOpen:\n\n # Update Market Data\n self._loadMarketData()\n\n else:\n # Backtesting - no way to know when markets open, so use some logic here, and the strategy config\n market_open = HOURS_OF_OPERATION_START <= self.currentInterval.hour <= HOURS_OF_OPERATION_END\n market_open = market_open and self.IsBusinessDay(self.currentInterval.date())\n\n if market_open:\n if len(self.strategy.BackTestValidTimes) > 0:\n market_open = self.currentInterval.strftime(\"%H:%M\") in self.strategy.BackTestValidTimes\n\n if market_open:\n # Perform Trading Logic\n if not self.strategy.SkipTradeLogic:\n self._technicalAnalysis()\n\n self._notifyTargetGapUps()\n\n self._processSellSignals()\n self._processBuySignals()\n\n\n \n # Process Messages\n if not self.backTest:\n while len(self.message_queue) > 0:\n\n msg = self.message_queue.pop(0)\n self._processTBotMessage(msg)\n\n self.lastTick = self.currentTick\n\n if not self.backTest:\n self.ib.sleep(1)\n\n # Bot Loop End, requires a Break to get here\n if self.backTest:\n if (self.currentTick >= self.backTestDateEnd):\n break\n\n \n except (KeyboardInterrupt, SystemExit):\n self._disconnectTWS()\n self.notifier.NotifyStopping()\n self.notifier.Stop()\n \n except:\n print(\"Unexpected exception\")\n self.notifier.NotifyException(traceback.format_exc())\n self.notifier.Stop()\n self._disconnectTWS()\n \n if self.backTest:\n self.backTestError = True\n\n print(f\"Waiting {self.frequency} Minutes\")\n self.ib.sleep(60 * self.frequency)\n \n \n self.twsRetryCount += 1\n if self.twsRetryCount > self.settings[\"TWS\"][\"MAXRETRY\"]:\n break # Exit Python\n self.twsClientID += 1\n \n\n\n def _connectTWS(self):\n print(\"Connecting to TWS API\")\n self.ib = IB()\n self.ib.errorEvent += self.onErrorEvent \n self.ib.connect(host=self.settings[\"TWS\"][\"HOST\"], port=self.settings[\"TWS\"][\"PORT\"], clientId=self.twsClientID, timeout=600)\n\n def _disconnectTWS(self):\n print(\"Disconnecting from TWS API\")\n try:\n self.ib.disconnect()\n except:\n pass\n\n\n # Error Codes -> #https://interactivebrokers.github.io/tws-api/message_codes.html\n def onErrorEvent(self, reqId, error_code, error_string, contract):\n \n # Warnings / Notifications\n if (error_code in [2104, 2106, 2158, 2103, 2105, 1102]):\n print(f\"TWS API Notification: {error_string}\")\n\n # Expected/Handled Errors\n elif (error_code == 200):\n self.stockNotFound = True\n\n elif (error_code == 162):\n if \"HMDS query returned no data\" in error_string:\n pass #This is normal, when there's no data for a particular day yet. If run just after midnight, before market open this occurs. Seems to occur for PINK stocks more regularly.\n\n elif \"API historical data query cancelled\" in error_string:\n print(f\"TWS API Notification, Error Code: {error_code}, Error String: {error_string}\")\n self.marketDataQueryFailed = True \n\n else:\n self.noMarketDataPermission = True\n if \"Historical data request pacing violation\" in error_string:\n self.apiLimitReached = True \n \n elif (error_code == 1100):\n self.twsConnectionLost = True\n\n else:\n self.notifier.NotifyTWSError(error_code, error_string)\n raise Exception\n\n\n def _identifyEarningsTargets(self):\n today = self.currentInterval.date()\n if (self.IsBusinessDay(today)):\n lastBusinessDay = today - timedelta(days=1)\n while not self.IsBusinessDay(lastBusinessDay):\n lastBusinessDay = lastBusinessDay - timedelta(days=1)\n\n earningsTargets = DataFrame()\n\n aDate = lastBusinessDay\n count = 0\n msg = \"\"\n while aDate <= today:\n \n dateStr = aDate.strftime(\"%Y-%m-%d\")\n earnings = self.earningsCalendar.GetEarnings(dateStr)\n count += len(earnings)\n\n if len(earnings) > 0:\n if aDate == lastBusinessDay:\n earnings = earnings[earnings.startdatetimetype.isin(['AMC'])] \n\n if len(earnings) > 0:\n earningsTargets = earningsTargets.append(earnings)\n msg += f\"\\n └ {dateStr} - {len(earnings)} after market\"\n\n elif aDate < today:\n earningsTargets = earningsTargets.append(earnings)\n msg += f\"\\n └ {dateStr} - {len(earnings)} while market was closed\"\n\n else:\n earnings = earnings[earnings.startdatetimetype.isin(['BMO', 'TNS'])] \n if len(earnings) > 0: \n earningsTargets = earningsTargets.append(earnings)\n msg += f\"\\n └ {dateStr} - {len(earnings)} pre market/unknown time\"\n\n aDate = aDate + timedelta(days=1)\n\n self.notifier.NotifyEarningsTargets(count, msg)\n\n self.earningsTargets = earningsTargets\n\n\n def _loadTargets(self, allowCache=False):\n \n symbols = self.earningsTargets['ticker']\n print(f'Loading {len(symbols)} symbols')\n\n loadFromTWS = True\n if allowCache:\n targetCacheDict = self.targetCache.GetTargetCache(self.currentInterval.date())\n loadFromTWS = len(targetCacheDict) == 0\n\n if not loadFromTWS:\n\n # Found in cache, load from Cache (Only while backtesting)\n \n for symbol in symbols:\n if '-' not in symbol: # Fixes a bug when symbols have a hyphen\n if symbol in targetCacheDict:\n targetCache = targetCacheDict[symbol]\n\n # Load from Target Cache\n detail = targetCache['detail']\n contract = Stock(symbol, \"SMART\", \"USD\", primaryExchange=targetCache['primaryExchange'])\n\n earningsItem = self.earningsTargets[self.earningsTargets.ticker == symbol].iloc[0]\n earningsDate = earningsItem.erDate\n earningsTiming = earningsItem.startdatetimetype\n\n target = self._createTarget(symbol, contract, detail, earningsDate=earningsDate, earningsTiming=earningsTiming)\n\n self.targetDict[symbol] = target\n\n if detail.industry not in self.industryDict.keys():\n self.industryDict[detail.industry] = []\n self.industryDict[detail.industry].append(target['symbol'])\n \n print(f\"Loaded From Cache - {symbol}: {detail.longName} ({contract.primaryExchange}) ({detail.industry} - {detail.category} - {detail.subcategory})\")\n else:\n self.unknownSymbols.append(symbol)\n\n else:\n\n for symbol in symbols:\n if '-' not in symbol: # Fixes a bug when symbols have a hyphen\n \n self.stockNotFound = False\n try:\n contract = Stock(symbol, \"SMART\", \"USD\")\n detail = self.ib.reqContractDetails(contract)[0]\n contract = Stock(symbol, \"SMART\", \"USD\", primaryExchange=detail.contract.primaryExchange)\n except:\n if (self.stockNotFound): \n print(f\"Could not load Symbol: {symbol}\")\n self.unknownSymbols.append(symbol)\n else:\n raise\n\n if (not self.stockNotFound):\n\n if detail.contract.primaryExchange == \"PINK\":\n print(f\"Symbol {symbol} is PINK\")\n self.pinkSymbols.append(symbol)\n\n else:\n\n earningsItem = self.earningsTargets[self.earningsTargets.ticker == symbol].iloc[0]\n earningsDate = earningsItem.erDate\n earningsTiming = earningsItem.startdatetimetype\n\n target = self._createTarget(symbol, contract, detail, earningsDate=earningsDate, earningsTiming=earningsTiming)\n \n print(f\"Found {symbol}: {detail.longName} ({contract.primaryExchange}) ({detail.industry} - {detail.category} - {detail.subcategory})\")\n\n # Check Timezone\n if detail.timeZoneId != \"US/Eastern\":\n raise Exception(f\"Unexpected Timezone: {detail.timeZoneId}\")\n\n\n # Parse Hours. Find Today, and get open/close time (if market is open)\n days = detail.liquidHours.split(';')\n # 20210811:0930-20210811:1600\n # 20210812:0930-20210812:1600\n # 20210813:0930-20210813:1600\n # 20210814:CLOSED\n # 20210815:CLOSED\n # 20210816:0930-20210816:1600 \n tickday = self.currentTick.strftime(\"%Y%m%d\")\n \n target['market_open_today'] = False\n\n for day in days:\n slist = day.split('-')\n start = slist[0]\n startparts = start.split(':')\n if startparts[0] == tickday:\n\n if startparts[1] == \"CLOSED\":\n \n # Reporting earnings when the market is closed? Odd situation, treat like an exception for now (Probably needs to be improved, as not taking into acount public holidays)\n self.notifier.NotifyEarningsWhileClosed(symbol, contract.primaryExchange)\n \n else:\n target['market_open_today'] = True\n end = slist[1]\n endparts = end.split(':')\n target['market_open_time'] = self.currentTick.replace(hour=int(startparts[1][0:2]), minute=int(startparts[1][2:]), second=0, microsecond=0)\n target['market_close_time'] = self.currentTick.replace(hour=int(endparts[1][0:2]), minute=int(endparts[1][2:]), second=0, microsecond=0)\n\n print(f\"{symbol} ({contract.primaryExchange}) is open today - {target['market_open_time']} - {target['market_close_time']}\\n\")\n\n break\n \n #print(f\"Min Tick: {detail.minTick}\")\n \n self.targetDict[symbol] = target\n\n if detail.industry not in self.industryDict.keys():\n self.industryDict[detail.industry] = []\n self.industryDict[detail.industry].append(target['symbol'])\n \n if allowCache and len(self.targetDict) > 0:\n self.targetCache.SaveTargetCache(self.currentInterval.date(), self.targetDict)\n\n def _createTarget(self, symbol, contract, detail, earningsDate=None, earningsTiming=\"\"):\n return { \n \"symbol\": symbol,\n \"contract\": contract,\n \"detail\": detail,\n\n \"earningsDate\": earningsDate,\n \"earningsTiming\": earningsTiming,\n\n \"gap_pc\": 0,\n \"gap_notified\": False,\n }\n\n\n def GetMidnight(self, date):\n return datetime.combine(date, datetime.min.time()).replace(tzinfo=self.timezone)\n\n def GetToday(self):\n return datetime.now(self.timezone).date()\n\n def IsBusinessDay(self, aDate):\n return (aDate.weekday() not in (5, 6))\n\n\n def _loadMarketData(self):\n print(\"Loading Market Data\\n\\n\")\n start = t.time()\n for symbol, target in self.targetDict.items():\n self._loadMarketDataTarget(target)\n \n if not self.apiLimitReached and not self.twsConnectionLost:\n\n # Remove Targets which are missing permissions\n removalList = []\n for badSymbol in self.noMarketDataPermissionSymbols:\n if badSymbol in self.targetDict.keys():\n removalList.append(badSymbol)\n del self.targetDict[badSymbol]\n if len(removalList) > 0:\n self.notifier.NotifyNoPermissions(removalList)\n\n if self.backTest:\n self.targetCache.RemoveTargetsFromCache(self.currentInterval.date(), removalList)\n\n\n # Remove Targets which have insufficient daily candle data\n removalList = []\n for symbol, target in list(self.targetDict.items()):\n print(f\"Checking {symbol} 1D data validity\")\n df = target['1d']\n if df.empty or (len(df) < MINIUMUM_DAILY_CANDLES):\n removalList.append(symbol)\n del self.targetDict[symbol]\n if len(removalList) > 0:\n self.notifier.NotifyRecentlyListedRemoved(removalList)\n\n if self.backTest:\n self.targetCache.RemoveTargetsFromCache(self.currentInterval.date(), removalList) \n\n\n # Remove Targets which have insufficient 5M candle data\n removalList = []\n for symbol, target in list(self.targetDict.items()):\n print(f\"Checking {symbol} 5M data validity\")\n df = target['5m']\n if df.empty or (len(df) < MINIUMUM_5M_CANDLES):\n removalList.append(symbol)\n del self.targetDict[symbol]\n if len(removalList) > 0:\n self.notifier.NotifyMissing5MData(removalList)\n\n if self.backTest:\n self.targetCache.RemoveTargetsFromCache(self.currentInterval.date(), removalList) \n\n \n end = t.time()\n delta = end - start\n print(\"Took %.2f seconds to process\\n\" % delta)\n\n def _checkMarketDataForDuplicates(self, target, df, desc):\n dupes = df[df.duplicated(['date'])]\n if not dupes.empty:\n raise Exception(f\"Duplicated {desc} OHLCV data found for {target['symbol']}\")\n \n\n\n def _loadMarketDataTarget(self, target):\n symbol = target['symbol']\n print(f\"\\nLoading data for {symbol}\")\n\n if (not self.twsConnectionLost) and (not self.apiLimitReached): \n\n target['1d'] = DataFrame()\n target['5m'] = DataFrame()\n # target['15m'] = DataFrame()\n \n self.noMarketDataPermission = False\n df = self.marketDataGrabber.LoadBatchMarketData(target, \"1 day\", self.currentInterval.date(), 330, limit=30)\n if not df.empty:\n target['1d'] = df.copy()\n self._checkMarketDataForDuplicates(target, target['1d'], \"1 day\")\n\n\n if (not self.noMarketDataPermission):\n df = self.marketDataGrabber.LoadBatchMarketData(target, \"5 mins\", self.currentInterval.date(), 10, limit=2)\n if not df.empty:\n target['5m'] = df.copy()\n self._checkMarketDataForDuplicates(target, target['5m'], \"5 min\") \n\n # if (not self.noMarketDataPermission):\n # df = self.marketDataGrabber.LoadBatchMarketData(target, \"15 mins\", self.currentInterval.date(), 3, limit=5)\n # if not df.empty:\n # target['15m'] = target['15m'].append(df) \n\n if self.noMarketDataPermission:\n self.noMarketDataPermissionSymbols.append(symbol)\n print(f\"No Market Data Permission for {symbol}\\n\")\n\n\n def _technicalAnalysis(self):\n for symbol, target in list(self.targetDict.items()):\n self._technicalAnalysisTarget(target)\n\n def _technicalAnalysisTarget(self, target):\n symbol = target['symbol']\n print(f\"Performing Technical Analysis on {symbol}\")\n\n try:\n task = \"Loading\"\n df_1D = target['1d'].copy()\n df_5M = target['5m'].copy()\n\n # df_15M = target['15m'].copy()\n # df_15M.index = pandas.DatetimeIndex(df_15M['date'])\n\n task = \"Filtering\"\n # Filter out dataframes later than the current interval\n df_1D = df_1D[df_1D['date'] <= self.currentInterval.date()] # Allow Partial Daily Candles\n df_5M = df_5M[df_5M['date'] <= self.currentInterval.replace(tzinfo=None)] \n #df_15M = df_15M[df_15M['date'] <= self.currentInterval.replace(tzinfo=None)]\n\n \n if df_1D.empty:\n raise Exception(f\"Symbol {symbol} empty 1d candle data\")\n\n if df_5M.empty:\n raise Exception(f\"Symbol {symbol} empty 5m candle data\")\n\n target['currentPrice'] = df_5M.iloc[-1].close\n\n # Recreate the daily candle for today, based on todays 5m candles\n if self.backTest:\n task = \"Recreating Last Daily Candle\"\n aDate = self.currentInterval.date()\n aDateTime = self.GetMidnight(self.currentInterval).replace(tzinfo=None)\n df = df_5M[df_5M['date'] >= aDateTime]\n \n aggregation = { \n 'open' :'first',\n 'high' :'max',\n 'low' :'min',\n 'close' :'last',\n 'volume':'sum' \n }\n df = df.resample('1D').agg(aggregation)\n if df.empty:\n today_df = df_1D[df_1D['date'] == aDate]\n if not today_df.empty:\n df_1D = df_1D.drop(aDate)\n else:\n df_1D.at[aDateTime, 'open'] = df.at[aDateTime, 'open']\n df_1D.at[aDateTime, 'high'] = df.at[aDateTime, 'high']\n df_1D.at[aDateTime, 'low'] = df.at[aDateTime, 'low']\n df_1D.at[aDateTime, 'close'] = df.at[aDateTime, 'close']\n df_1D.at[aDateTime, 'volume'] = df.at[aDateTime, 'volume']\n\n\n task = \"Technical Analysis\"\n\n # 1 Day\n # SMA_10\n df_1D.ta.sma(length=10, append=True)\n # SMA_20\n df_1D.ta.sma(length=20, append=True)\n # SMA_50\n df_1D.ta.sma(length=50, append=True)\n # SMA_100\n df_1D.ta.sma(length=100, append=True)\n # SMA_150\n df_1D.ta.sma(length=150, append=True)\n target['1d_ta'] = df_1D\n\n # 15 Min\n # EMA_10\n # df_15M.ta.ema(length=10, append=True)\n # target['15m_ta'] = df_15M\n\n \n # 5 Min\n target['5m_ta'] = df_5M\n\n\n\n # Calculate ADR\n # Regarding the ADR formula in TC2000; its using division to get a percent change each day. It adds up these percentage gains for last 20 periods, then divides the sum by 20 periods. Then it subtracts 1 from that number to have a decimal number. Then the decimal number is times by 100 to get it back into a full percentage.\n # 100*((H0/L0+H1/L1+H2/L2+H3/L3+H4/L4+H5/L5+H6/L6+H7/L7+H8/L8+H9/L9+H10/L10+H11/L11+H12/L12+H13/L13+H14/L14+H15/L15+H16/L16+H17/L17+H18/L18+H19/L19)/20-1)\n task = \"Calculating ADR\"\n pandas.set_option('mode.chained_assignment', None)\n # Get the last 20 days from a Pandas DataFrame containing daily OHLCV candle data (open, high, low, close, volume).\n df = df_1D.tail(20)\n # Get the range for each day, as a fractional value\n df['range_f'] = df['high'] / df['low']\n # Convert to Decimal\n df['range_px'] = 100 * (df['range_f'] - 1)\n adr = df['range_px'].mean() #Average\n \n target['adr'] = adr\n pandas.set_option('mode.chained_assignment', 'warn')\n\n\n\n\n # Get first candle of the current day\n task = \"Loading 1D Open Candle\"\n df = df_1D[df_1D.date == self.currentInterval.date()]\n if df.empty:\n print(f\"Skipping {symbol}, no opening candle data\")\n else:\n today_open = df.iloc[0]\n\n # Get last candle of the most recent day\n task = \"Loading last 1D Close Candle\"\n df_yesterday = df_1D[df_1D.date < self.currentInterval.date()]\n yesterday_close = df_yesterday.iloc[-1]\n\n\n target['yesterday_close'] = yesterday_close.close\n target['today_open'] = today_open.open\n\n gap = today_open.open - yesterday_close.close \n target['gap_pc'] = (gap / yesterday_close.close) * 100.0\n\n # if gap > 3:\n # print(\"test\")\n\n except (KeyboardInterrupt):\n raise\n\n except:\n if target in self.targetDict:\n del self.targetDict[symbol]\n\n self.notifier.NotifyFailedTechnicalAnalysis(symbol, task)\n\n\n print(traceback.format_exc())\n\n self.notifier.NotifyException(traceback.format_exc())\n \n \n def _processBuySignals(self):\n for symbol, target in list(self.targetDict.items()):\n self._processBuySignalsTarget(target)\n\n def _processBuySignalsTarget(self, target):\n symbol = target['symbol']\n\n if symbol in self.trades.GetOpenTrades():\n print(f\"Found open trade for {symbol}, removed from target dict\")\n del self.targetDict[symbol]\n else:\n if self.strategy.buy_signal(target):\n\n trade = self.trades.OpenTrade(target)\n del self.targetDict[symbol]\n\n self.notifier.NotifyTradeOpen(symbol, trade)\n \n\n def _processSellSignals(self):\n openTrades = self.trades.GetOpenTrades()\n for symbol, trade in openTrades.items():\n self._processSellSignalsTrade(trade)\n\n def _processSellSignalsTrade(self, trade):\n symbol = trade['symbol']\n\n self._loadMarketDataTarget(trade)\n self._technicalAnalysisTarget(trade)\n if self.strategy.sell_signal(trade):\n\n closedTrade = self.trades.CloseTrade(trade)\n self.notifier.NotifyTradeClose(symbol, closedTrade)\n\n # todo - Respond to bot chart request\n def todoProcessChartRequest(self, msg):\n\n cleanedStr = re.sub(r'[^a-zA-Z0-9 ]', '', msg.strip()).split(' ')\n symbol = \"\"\n try:\n self.stockNotFound = False\n \n symbol = cleanedStr[0].upper()\n \n try:\n contract = Stock(symbol, \"SMART\", \"USD\")\n detail = self.ib.reqContractDetails(contract)[0]\n contract = Stock(symbol, \"SMART\", \"USD\", primaryExchange=detail.contract.primaryExchange)\n\n target = self._createTarget(symbol, contract, detail)\n\n\n # self._loadMarketDataTarget(target)\n\n target['1d'] = DataFrame()\n target['5m'] = DataFrame()\n\n self.noMarketDataPermission = False\n df = self.marketDataGrabber.LoadBatchMarketData(target, \"1 day\", self.currentInterval.date(), 360, limit=30)\n if not df.empty:\n target['1d'] = df.copy()\n self._checkMarketDataForDuplicates(target, target['1d'], \"1 day\")\n\n\n df = self.marketDataGrabber.LoadBatchMarketData(target, \"5 mins\", self.currentInterval.date(), 10, limit=2)\n if not df.empty:\n target['5m'] = df.copy()\n\n self._technicalAnalysisTarget(target)\n # self._notifyPlot(symbol, target['1d_ta'], '1 day', 365)\n self.GenerateChart(symbol, target['1d_ta'], '1 day', 150)\n # self._notifyPlot(symbol, target['5m_ta'], '5 mins', 6)\n\n if self.noMarketDataPermission:\n self.noMarketDataPermissionSymbols.append(symbol)\n print(f\"No Market Data Permission for {symbol}\\n\")\n\n except:\n if (self.stockNotFound): \n print(f\"Could not load Symbol: {symbol}\")\n self.unknownSymbols.append(symbol)\n else:\n self._notify(f\"\\U00002755 Unexpected error\")\n\n except:\n self._notify(f\"\\U00002754 Unable to locate symbol {symbol}\")\n\n\n def _notifyTargetGapUps(self):\n for symbol, target in self.targetDict.items():\n if not target['gap_notified']:\n if target['gap_pc'] >= 3:\n self.notifier.NotifyTargetGapUp(target)\n target['gap_notified'] = True\n\n \n\n\n # def test(self):\n # self.backTest = True\n # self.timezone = timezone('EST')\n \n # # target = { \n # # 'symbol': 'TSLA',\n # # 'contract': Stock('TSLA', \"SMART\", \"USD\", primaryExchange=\"NASDAQ\") \n # # }\n\n # self._connectTWS()\n # self.marketDataGrabber = MarketDataGrabber(self)\n # # self.marketDataGrabber.DeleteFromCache(\"TSLA\")\n # # df = self.marketDataGrabber.LoadBatchMarketData(target, \"15 mins\", datetime.now(self.timezone).date(), 3)\n # # self._disconnectTWS()\n\n # self._clean()\n \n # testDateStr = \"20210802 09:45:00\" \n # testSymbol = \"GLT\"\n\n # self.currentInterval = datetime.strptime(testDateStr, '%Y%m%d %H:%M:%S').replace(tzinfo=self.timezone)\n\n # target = { \n # 'symbol': testSymbol,\n # 'contract': Stock(testSymbol, \"SMART\", \"USD\", primaryExchange=\"NASDAQ\") \n # }\n # self.targetDict[target['symbol']] = target\n\n # # df = self.marketDataGrabber.LoadBatchMarketData(target, \"1 day\", aDate, 300, limit=100)\n # self._loadMarketDataTarget(target)\n # self._technicalAnalysisTarget(target)\n # # self._notifyPlot(target['symbol'], df, \"1 day\", 60)","repo_name":"webclinic017/Octopus","sub_path":"octopus.py","file_name":"octopus.py","file_ext":"py","file_size_in_byte":35529,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"8303784787","text":"import django\n\n\ndef get_cached_value(instance, descriptor, default=None):\n if django.VERSION < (2, 0):\n return getattr(instance, descriptor.cache_name, default)\n else:\n return descriptor.field.get_cached_value(instance, default=default)\n\n\ndef set_cached_value_by_descriptor(instance, descriptor, value):\n if django.VERSION < (2, 0):\n setattr(instance, descriptor.cache_name, value)\n else:\n descriptor.field.set_cached_value(instance, value)\n\n\ndef set_cached_value_by_field(instance, field, value):\n if django.VERSION < (2, 0):\n setattr(instance, field.get_cache_name(), value)\n else:\n field.set_cached_value(instance, value)\n","repo_name":"onysos/django-composite-foreignkey","sub_path":"compositefk/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"81"} +{"seq_id":"7594375295","text":"import numpy as np \nimport random as rn\nimport os \nimport sys\nimport graspable_checker_tree as gct\nimport graspable_checker_all as gca\nimport candidate_make as cmake\nclass workspace:\n def __init__(self,rows=0,cols=0):\n #self.grid_act = np.array(False)\n #self.grid_act.resize(rows,cols)\n #self.neighbor_name = [\"lower\",\"higher\",\"before\",\"after\"]\n self.goal = -1\n self.obstacles = []\n\n def clear(self):\n for i in range(len(self.grid_act)):\n for j in range(len(self.grid_act[0])):\n self.grid_act[i][j] = False\n\n def index2xy(self,ind):\n return ind//len(self.grid_act[0]), ind%len(self.grid_act[0])\n\n def xy2index(self,i,j):\n return i*len(self.grid_act[0]) + j\n \n def make_str_objects_param(self):\n #cell_number = len(self.grid_act) \n objects_number = gca.graspable_checker()\n out_str=\"(:objects \"\n for n in range(objects_number):\n out_str+=\"obstacle\" + str(n) + \" \"\n out_str += \"target\"\n out_str += \" - objects\"\n return out_str\n\n def make_str_slots_param(self):\n slots_number = cmake.graspable_checker() \n out_str=\" \"\n for n in range(len(slots_number)):\n out_str+=\"slot\" + str(n) + \" \"\n out_str += \"- empty_slots)\"\n return out_str\n\n # def make_str_param_amount(self):\n # cell_number = len(self.grid_act) \n # out_str=\" \"\n # for cell in range(cell_number):\n # out_str+=\"(=(amount obstacle\" + str(cell) + \" \" + \"slot0\" + \")\" + \" \" + \"0)\" +\"\\n\"\n # out_str+=\"(=(amount obstacle\" + str(cell) + \" \" + \"slot1\" + \")\" + \" \" + \"0)\" + \"\\n\"\n # out_str+=\"(=(amount obstacle\" + str(cell) + \" \" + \"slot2\" + \")\" + \" \" + \"0)\" + \"\\n\"\n # out_str+=\"(=(amount obstacle\" + str(cell) + \" \" + \"slot3\" + \")\" + \" \" + \"0)\" + \"\\n\"\n # out_str+=\"(=(amount obstacle\" + str(cell) + \" \" + \"slot4\" + \")\" + \" \" + \"0)\" + \"\\n\"\n # out_str+=\"(=(amount obstacle\" + str(cell) + \" \" + \"slot5\" + \")\" + \" \" + \"0)\" + \"\\n\"\n # out_str+=\"(=(amount target\" + \" \" + \"slot0\" + \")\" + \" \" + \"0)\" +\"\\n\"\n # out_str+=\"(=(amount target\" + \" \" + \"slot1\" + \")\" + \" \" + \"0)\" +\"\\n\"\n # out_str+=\"(=(amount target\" + \" \" + \"slot2\" + \")\" + \" \" + \"0)\" +\"\\n\"\n # out_str+=\"(=(amount target\" + \" \" + \"slot3\" + \")\" + \" \" + \"0)\" +\"\\n\"\n # out_str+=\"(=(amount target\" + \" \" + \"slot4\" + \")\" + \" \" + \"0)\" +\"\\n\"\n # out_str+=\"(=(amount target\" + \" \" + \"slot5\" + \")\" + \" \" + \"0)\" +\"\\n\"\n # return out_str\n\n #def make_str_param_numberOf(self):\n # cell_number = len(self.grid_act) \n # out_str=\" \"\n # for cell in range(cell_number):\n # out_str+=\"(=(numberOf obstacle0\" + \" \" + \"obstacle\" + str(cell) + \")\" +\" \"+ \"0)\" + \"\\n\"\n # out_str+=\"(=(numberOf obstacle1\" + \" \" + \"obstacle\" + str(cell) + \")\" +\" \"+ \"0)\" + \"\\n\"\n # out_str+=\"(=(numberOf obstacle2\" + \" \" + \"obstacle\" + str(cell) + \")\" + \" \" + \"0)\" + \"\\n\"\n # out_str+=\"(=(numberOf obstacle3\" + \" \" + \"obstacle\" + str(cell) + \")\" + \" \" + \"0)\" + \"\\n\" \n # return out_str\n\n def make_str_param_on_table(self):\n objects_number = gca.graspable_checker() \n out_str=\" \"\n for cell in range(objects_number):\n out_str+=\"(on-table obstacle\" + str(cell) + \")\"\n out_str+=\"(on-table target\" + \")\"\n return out_str\n\n def make_str_param_canGrasp(self):\n objects_number = gca.graspable_checker() \n out_str=\"\"\n for cell in range(objects_number):\n out_str+=\"(canGrasp obstacle\" + str(cell) + \")\" \n return out_str\n\n def make_str_param_clear(self):\n objects_number = gca.graspable_checker() \n out_str=\"\"\n for cell in range(objects_number):\n out_str+=\"(clear obstacle\" + str(cell) + \")\"\n out_str+=\"(clear target\" + \")\" \n return out_str\n\n def make_str_param_obstruct(self):\n sequence_of_obstacles = gct.graspable_checker()\n out_str = \"\"\n sequence_of_obstacles = sequence_of_obstacles[:-1]\n for idx, element in enumerate(sequence_of_obstacles):\n out_str+=\"(obstruct\" + str(idx) + \" \" + \"target\" + \" \" + \"obstacle\" + str(element) + \")\" \n\n \n if gct.graspable_checker is None:\n out_str=\"(obstruct target\" + \" \" + \"target\" + \")\" \n return out_str\n\n def make_str_param_clean_slots(self):\n slots_number = cmake.graspable_checker()\n out_str=\"\"\n for cell in range(len(slots_number)):\n out_str += \"(clean\" + \" \" + \"slot\" + str(cell) + \")\" \n return out_str\n\n\n def make_str_param_clean(self):\n out_str = \" \"\n cell_number = len(self.grid_act)\n for cell in range(cell_number):\n rand_cell = rn.randint(0,cell)\n out_str += \"(\" + \" \" + \"obstacle\" + str(rand_cell) + \")\" \n return out_str\n \n\n def mark_occupancy(self,ind,b):\n i,j = self.index2xy(ind)\n self.grid_act[i][j] = b\n\n def is_occupied(self,ind):\n i,j = self.index2xy(ind)\n return self.grid_act[i][j]\n\n def set_obstacles(self, n_of_obs):\n cell_number = len(self.grid_act) \n obstacles_ind = []\n while(len(obstacles_ind) != n_of_obs):\n obs_candidate = rn.randint(0,cell_number-1)\n if(self.is_occupied(obs_candidate) != True):\n #add to obstables list\n obstacles_ind.append(obs_candidate)\n #mark as occupied\n self.mark_occupancy(obs_candidate,True)\n\n self.obstacles = sorted(obstacles_ind)\n \n def make_str_complete(self):\n out_str = \"(define (problem test_p)\\n (:domain test)\\n\"\n out_str += \" \" + self.make_str_objects_param() + \"\\n\"\n out_str += \" \" + \"\\n\" \n out_str += \" \" + self.make_str_slots_param() + \"\\n\"\n out_str += \" \" + \"\\n\" \n out_str += \" (:init\\n\"\n #out_str += self.make_str_param_amount() + \"\\n\"\n #out_str += \" \" + \"\\n\" \n #out_str += self.make_str_param_numberOf() + \"\\n\" \n #out_str += \" \" + \"\\n\" \n #out_str += \" \" + \"(=(maxItems) 2)\" + \"\\n\" \n out_str += \" \" + self.make_str_param_on_table() + \"\\n\"\n out_str += \" \" + \"\\n\" \n out_str += self.make_str_param_obstruct() + \"\\n\" \n out_str += \" \" + \"\\n\" \n out_str += self.make_str_param_canGrasp() + \"\\n\" \n out_str += \" \" + \"\\n\" \n out_str += self.make_str_param_clear() + \"\\n\"\n out_str += \" \" + \"\\n\" \n out_str += self.make_str_param_clean_slots() + \"\\n\" \n out_str += \" \" + \"\\n\" \n #out_str += self.make_str_param_on() + \"\\n\" \n out_str += \" \" + \"\\n\" \n out_str += \"(emptyhand)\" + \"\\n\" \n out_str += \" \" + \")\\n\"\n out_str += \" \" + \"(:goal (and (holding target\" + \") ))\\n\"\n out_str += \")\"\n return out_str\n\ndef modify_pddl_random(map,pddl_file):\n #map.clear()\n # map.set_obstacles(n_obj)\n # print(map.make_str_objects_param()) \n # print(map.make_str_slots_param())\n pddl_content = map.make_str_complete()\n #print(pddl_content)\n\n save_path = '/home/umka/catkin_ws/src/try/common/PDDL/'\n pddl_file2 = os.path.join(save_path, pddl_file)\n\n f = open(pddl_file2, \"w\")\n f.write(pddl_content)\n f.close()\n\n return map\n \ndef run_terminal(cmd):\n return os.system(cmd)\n\nclass parameter_set:\n def __init__(self,file=\"\"):\n self.repeat_n = -1\n #self.objects_number = -1\n #self.slots_number = -1\n self.pddl_file = \"\"\n self.run_cmd = \"\"\n if(file!=\"\"):\n with open(file) as f:\n lines = f.read().splitlines()\n for line in lines:\n temp = line.split(\":\")\n if(temp[0] == \"repeat_n\"):\n self.repeat_n = int(temp[1])\n elif(temp[0] == \"pddl_file\"):\n self.pddl_file = temp[1]\n #elif(temp[0] == \"objects_number\"):\n #self.objects_number = temp[1]\n #elif(temp[0] == \"slots_number\"):\n #self.slots_number = temp[1]\n elif(temp[0] == \"run_cmd\"):\n self.run_cmd = temp[1]\n else:\n print(\"unknown parameter name\")\n\n\nparam_file_name = \"parameters.csv\"\nprint(\"Read Parameters from File: \" + param_file_name)\nparam_set = parameter_set(param_file_name)\n\ngrid_N = gca.graspable_checker()\ngrid_S = cmake.graspable_checker()\nprint(\"Generating workspace with \"+ str(grid_N) + str(grid_S))\nmap = workspace(grid_N,grid_S)\n\nlog_list = []\nnumber_of_objects = gca.graspable_checker()\nn_repeat = param_set.repeat_n\nslots_number = cmake.graspable_checker()\n#the loop\n#for obj in range(number_of_objects):\n #for slot in range(slots_number):\nfor repeat in range(n_repeat):\n modified_map = modify_pddl_random(map,param_set.pddl_file)\n \nprint(\"Finished\")\nsys.exit()\n","repo_name":"rakhmanu/TaskPlanning","sub_path":"problem_generator.py","file_name":"problem_generator.py","file_ext":"py","file_size_in_byte":8973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36201543445","text":"# -*- coding: utf-8 -*-\n\"\"\"Hierarchical Graph Attention Network, by Mi et al., 2020.\"\"\"\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom .base_sg_generator import BaseSGGenerator\n\n\nclass HGATNet(BaseSGGenerator):\n \"\"\"Extends PyTorch nn.Module.\"\"\"\n\n def __init__(self, config):\n \"\"\"Initialize layers.\"\"\"\n super().__init__(config, {'base_features', 'pool_features'})\n\n # Object context\n self.obj_projector = nn.Sequential(\n nn.Linear(1024 + 300, 512), nn.ReLU(),\n nn.Linear(512, 256)\n )\n self.inner_projector1 = nn.Sequential(\n nn.Linear(256, 128), nn.ReLU(),\n nn.Linear(128, 128, bias=False)\n )\n self.inner_projector2 = nn.Sequential(\n nn.Linear(256, 128), nn.ReLU(),\n nn.Linear(128, 128)\n )\n self.cntxt_obj_projector = nn.Sequential(\n nn.Linear(256, 128), nn.ReLU(),\n nn.Linear(128, 256)\n )\n # Rel context\n self.rel_projector = nn.Sequential(\n nn.Linear(1024 + 300 + 256 + 1024 + 300 + 256 + 38, 512),\n nn.ReLU(),\n nn.Linear(512, 256)\n )\n self.inner_rel_projector1 = nn.Sequential(\n nn.Linear(256, 128), nn.ReLU(),\n nn.Linear(128, 128, bias=False)\n )\n self.inner_rel_projector2 = nn.Sequential(\n nn.Linear(256, 128), nn.ReLU(),\n nn.Linear(128, 128)\n )\n self.cntxt_rel_projector = nn.Sequential(\n nn.Linear(256, 128), nn.ReLU(),\n nn.Linear(128, 256)\n )\n # Classifiers\n self.fc_classifier = nn.Sequential(\n nn.Linear(256, 256), nn.ReLU(),\n nn.Linear(256, self.num_rel_classes)\n )\n self.fc_classifier_bin = nn.Sequential(\n nn.Linear(256, 128), nn.ReLU(),\n nn.Linear(128, 2)\n )\n\n def contextualize(self, objects, base_features):\n \"\"\"Refine object features using structured motifs.\"\"\"\n obj_feats = self.obj_projector(torch.cat(\n (objects['pool_features'], self.get_obj_embeddings(objects['ids'])),\n dim=1\n ))\n proj1_feats = self.inner_projector1(obj_feats)\n proj2_feats = self.inner_projector2(obj_feats)\n alphas = torch.mm(proj1_feats, proj2_feats.T)\n alphas.fill_diagonal_(0)\n alphas /= (alphas.sum(1).view(-1, 1) + 1e-8)\n # Object context refinement (before obj. classification)\n obj_feats = F.relu(\n obj_feats\n + torch.matmul(alphas, self.cntxt_obj_projector(obj_feats))\n )\n objects['ref_features'] = obj_feats\n return objects\n\n def net_forward(self, base_features, objects, pairs):\n \"\"\"Forward pass, override.\"\"\"\n return self._forward(\n objects['pool_features'][pairs[:, 0]],\n objects['pool_features'][pairs[:, 1]],\n objects['ref_features'][pairs[:, 0]],\n objects['ref_features'][pairs[:, 1]],\n self.get_obj_embeddings(objects['ids'][pairs[:, 0]]),\n self.get_obj_embeddings(objects['ids'][pairs[:, 1]]),\n self.get_spatial_features(\n objects['boxes'][pairs[:, 0]],\n objects['boxes'][pairs[:, 1]]\n )\n )\n\n def _forward(self, subj_feats, obj_feats, ref_subj_feats, ref_obj_feats,\n subj_emb, obj_emb, spat_feats):\n \"\"\"Forward pass, returns output scores.\"\"\"\n # Predicate features\n feats = self.rel_projector(torch.cat(\n (subj_feats, obj_feats, subj_emb, obj_emb, ref_subj_feats,\n ref_obj_feats, spat_feats),\n dim=1\n ))\n proj1_feats = self.inner_rel_projector1(feats)\n proj2_feats = self.inner_rel_projector2(feats)\n alphas = torch.mm(proj1_feats, proj2_feats.T)\n alphas.fill_diagonal_(0)\n alphas /= (alphas.sum(1).view(-1, 1) + 1e-8)\n feats = F.relu(\n feats\n + torch.matmul(alphas, self.cntxt_rel_projector(feats))\n )\n # Classification\n scores = self.fc_classifier(feats)\n scores_bin = self.fc_classifier_bin(feats)\n if self.mode == 'test':\n scores = self.softmax(scores)\n scores_bin = self.softmax(scores_bin)\n return scores, scores_bin\n","repo_name":"deeplab-ai/grounding-consistent-vrd","sub_path":"common/models/sg_generator/hgat_net.py","file_name":"hgat_net.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"28438477738","text":"import angr\nimport claripy\nimport logging\n\nl = logging.getLogger(name=__name__)\n\nclass LinuxLoader(angr.SimProcedure):\n NO_RET = True\n\n # pylint: disable=unused-argument,arguments-differ,attribute-defined-outside-init\n local_vars = ('initializers',)\n def run(self):\n self.initializers = self.project.loader.initializers\n self.run_initializer()\n\n def run_initializer(self):\n if len(self.initializers) == 0:\n self.project.simos.set_entry_register_values(self.state)\n self.jump(self.project.entry)\n else:\n addr = self.initializers[0]\n self.initializers = self.initializers[1:]\n self.call(addr, (self.state.posix.argc, self.state.posix.argv, self.state.posix.environ), 'run_initializer')\n\nclass IFuncResolver(angr.SimProcedure):\n NO_RET = True\n local_vars = ('saved_regs',)\n\n # pylint: disable=arguments-differ,unused-argument\n def run(self, funcaddr=None, gotaddr=None, funcname=None):\n self.saved_regs = {reg.name: self.state.registers.load(reg.name) for reg in self.arch.register_list if reg.argument}\n self.call(funcaddr, (), continue_at='after_call')\n\n def after_call(self, funcaddr=None, gotaddr=None, funcname=None):\n value = self.cc.return_val.get_value(self.state)\n for name, val in self.saved_regs.items():\n self.state.registers.store(name, val)\n\n self.state.memory.store(gotaddr, value, endness=self.state.arch.memory_endness)\n self.jump(value)\n\n def __repr__(self):\n return '' % self.kwargs.get('funcname', None)\n","repo_name":"ucsb-seclab/heapster","sub_path":"heapster-env/angr-dev/angr/angr/procedures/linux_loader/sim_loader.py","file_name":"sim_loader.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"81"} +{"seq_id":"22484014792","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n# from django.views.decorators.csrf import csrf_exempt\nfrom scraping.models import NewsItem, TagNews, Lowonganku,KotaLowonganku\nfrom django.db.models import Count, Sum\nfrom django.http import JsonResponse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n# from .resources import NewsResource\n# import csv\n# from django.contrib.auth.models import User\n\n\n# Create your views here.\n\ndef pencarian(request):\n\t'''Membuka halaman pencarian'''\n\tif request.method == 'POST':\n\t\tkeyword = request.POST.get('keyword',None)\n\t\treturn render(request, 'pencarian.html', {'keyword' : keyword})\n\telse :\n\t\treturn render(request, 'pencarian.html')\n\ndef loadBerita(request):\n\t'''Mengambil berita sesuai dengan pencarian\n\tpage : Melihat halaman berapa (Per halaman ditentukan oleh beritaPerPage)\n\tberitaPerPage : menentukan berapa banyak berita per halaman\n\twebsite_name : Menentukan website mana yang dilakukan pencarian\n\tstartDate : Digunakan untuk sortir berita dari tanggal tersebut\n\tendDate : Digunakan untuk sortir berita sebelum tanggal tersebut\n\t'''\n\tpage = request.POST.get('page', 1)\n\tberitaPerPage = 10\n\twebsite_name = request.POST.get('website_name', 'all')\n\tkeyword = request.POST.get('keyword', None)\n\tstartDate = request.POST.get('startDate', None)\n\tendDate = request.POST.get('endDate', None)\n\n\tif startDate : \n\t\tif endDate :\n\t\t# Ada Tanggal Awal dan Akhir\n\t\t\tif website_name == \"all\":\n\t\t\t\t# Ada Tanggal Awal dan Akhir + Semua Web\n\t\t\t\tberitalist = NewsItem.objects.filter(judul__iregex=r'\\s{}\\s'.format(keyword)).filter(date__date__gte = startDate).filter(date__date__lte = endDate)[(int(page)-1)*int(beritaPerPage) : int(page)*int(beritaPerPage)]\n\t\t\t\ttotal = NewsItem.objects.filter(judul__iregex=r'\\s{}\\s'.format(keyword)).filter(date__date__gte = startDate).filter(date__date__lte = endDate).count()\n\t\t\telse :\n\t\t\t\t# Ada Tanggal Awal dan Akhir + Web Tertentu\n\t\t\t\tberitalist = NewsItem.objects.filter(judul__iregex=r'\\s{}\\s'.format(keyword)).filter(website = website_name).filter(date__date__gte = startDate).filter(date__date__lte = endDate)[(int(page)-1)*int(beritaPerPage) : int(page)*int(beritaPerPage)]\n\t\t\t\ttotal = NewsItem.objects.filter(judul__iregex=r'\\s{}\\s'.format(keyword)).filter(website = website_name).filter(date__date__gte = startDate).filter(date__date__lte = endDate).count()\n\t\telse :\n\t\t# Hanya ada Tanggal Awal\n\t\t\tif website_name == \"all\":\n\t\t\t\t# Ada Tanggal Awal dan Akhir + Semua Web\n\t\t\t\tberitalist = NewsItem.objects.filter(judul__iregex=r'\\s{}\\s'.format(keyword)).filter(date__date__gte = startDate)[(int(page)-1)*int(beritaPerPage) : int(page)*int(beritaPerPage)]\n\t\t\t\ttotal = NewsItem.objects.filter(judul__iregex=r'\\s{}\\s'.format(keyword)).filter(date__date__gte = startDate).count()\n\t\t\telse :\n\t\t\t\t# Ada Tanggal Awal dan Akhir + Web Tertentu\n\t\t\t\tberitalist = NewsItem.objects.filter(judul__iregex=r'\\s{}\\s'.format(keyword)).filter(website = website_name).filter(date__date__gte = startDate)[(int(page)-1)*int(beritaPerPage) : int(page)*int(beritaPerPage)]\n\t\t\t\ttotal = NewsItem.objects.filter(judul__iregex=r'\\s{}\\s'.format(keyword)).filter(website = website_name).filter(date__date__gte = startDate).count()\n\telif website_name == \"all\":\n\t\t#Tidak ada tanggal Awal dan akhir + Semua Web\n\t\tberitalist = NewsItem.objects.filter(judul__iregex=r'\\s{}\\s'.format(keyword))[(int(page)-1)*int(beritaPerPage) : int(page)*int(beritaPerPage)]\n\t\ttotal = NewsItem.objects.filter(judul__iregex=r'\\s{}\\s'.format(keyword)).count()\n\telse :\n\t\t#Tidak ada tanggal Awal dan akhir + Web tertentu\n\t\tberitalist = NewsItem.objects.filter(judul__iregex=r'\\s{}\\s'.format(keyword)).filter(website = website_name)[(int(page)-1)*int(beritaPerPage) : int(page)*int(beritaPerPage)]\n\t\ttotal = NewsItem.objects.filter(judul__iregex=r'\\s{}\\s'.format(keyword)).filter(website = website_name).count()\n\t# beritalist = NewsItem.objects.all()[0:10]\n\t\n\tresult = []\n\ttagresult = []\n\tfor x in beritalist:\n\t\trow = {\n\t\t\t'judul' : x.judul,\n\t\t\t'url' : x.url,\n\t\t\t'date' : x.date,\n\t\t\t'website' : x.website,\n\t\t\t'berita' :x.berita,\n \t\t}\n\t\tresult.append(row)\n\t\ttags = []\n\t\tfor y in x.tags.all():\n\t\t\ttaglist = {\n\t\t\t\t'tags' : y.tag\n\t\t\t}\n\t\t\ttags.append(taglist)\n\t\ttagresult.append(tags)\n\treturn JsonResponse({'tags':tagresult,'beritaList' : result, 'total' : total, 'beritaPerPage' : beritaPerPage, 'website_name' : website_name})\n\n\n# def export(request):\n# person_resource = NewsResource()\n# dataset = person_resource.export()\n# response = HttpResponse(dataset.csv, content_type='text/csv')\n# response['Content-Disposition'] = 'attachment; filename=\"persons.csv\"'\n# return response\n","repo_name":"TrellixVulnTeam/bitalisy_PHB2","sub_path":"pencarian/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73741321864","text":"from pathlib import Path\nfrom unittest.mock import patch\n\nfrom fpdf import FPDF\nfrom ocrmypdf import EncryptedPdfError\nfrom pdfminer.pdfdocument import PDFPasswordIncorrect\n\nfrom plagdef.model.models import File, Document\nfrom plagdef.repositories import DocumentFileRepository, PdfReader, FileRepository\n\n\ndef test_list_documents_ignores_pdef_files(tmp_path):\n with (tmp_path / 'doc1.txt').open('w', encoding='utf-8') as f:\n f.write('This is a document.\\n')\n with (tmp_path / 'prep.pdef').open('w', encoding='utf-8') as f:\n f.write('This is a preprocessing file.')\n repo = DocumentFileRepository(tmp_path, lang='en', use_ocr=True)\n docs = repo.list()\n assert len(docs) == 1\n\n\ndef test_list_with_doc_dir_containing_pdf(tmp_path):\n doc1 = FPDF()\n doc1.add_page()\n doc1.set_font('helvetica', size=12)\n doc1.cell(w=0, txt='This is a PDF file containing one sentence.')\n doc1.output(f'{tmp_path}/doc1.pdf')\n with (tmp_path / 'doc2.txt').open('w', encoding='utf-8') as f:\n f.write('This also is a document.\\n')\n repo = DocumentFileRepository(tmp_path, lang='en', use_ocr=True)\n docs = repo.list()\n assert 'This is a PDF file containing one sentence.' in [doc.text for doc in docs]\n assert 'This also is a document.\\n' in [doc.text for doc in docs]\n\n\ndef test_list_with_doc_dir_containing_pdf_with_uppercase_file_suffix(tmp_path):\n doc1 = FPDF()\n doc1.add_page()\n doc1.set_font('helvetica', size=12)\n doc1.cell(w=0, txt='This is a PDF file containing one sentence.')\n doc1.output(f'{tmp_path}/doc1.PDF')\n with (tmp_path / 'doc2.txt').open('w', encoding='utf-8') as f:\n f.write('This also is a document.\\n')\n repo = DocumentFileRepository(tmp_path, lang='en', use_ocr=True)\n docs = repo.list()\n assert 'This is a PDF file containing one sentence.' in [doc.text for doc in docs]\n assert 'This also is a document.\\n' in [doc.text for doc in docs]\n\n\ndef test_list_with_doc_dir_containing_pdf_with_no_text(tmp_path):\n doc1 = FPDF()\n doc1.add_page()\n doc1.output(f'{tmp_path}/doc1.pdf')\n with (tmp_path / 'doc2.txt').open('w', encoding='utf-8') as f:\n f.write('This also is a document.\\n')\n repo = DocumentFileRepository(tmp_path, lang='en', use_ocr=True)\n docs = repo.list()\n assert len(docs) == 2\n assert 'This also is a document.\\n' in [doc.text for doc in docs]\n\n\ndef test_create_doc_with_file_in_subdir(tmp_path):\n doc_repo = DocumentFileRepository(tmp_path)\n # This file is located in tmp_path/sub/dir/doc.txt\n file_path = Path(f\"{tmp_path}/sub/dir/doc.txt\")\n file = File(file_path, \"Hello World!\", False)\n doc = doc_repo._create_doc(file)\n assert doc == Document(\"doc\", str(file_path), \"Hello World!\")\n assert doc.path == str(file_path)\n\n\n@patch.object(PdfReader, 'extract_text', return_value=\"Hello World!\")\n@patch.object(PdfReader, 'extract_urls', return_value=None)\ndef test_create_doc_if_extract_urls_returns_none(pdf_mock, tmp_path):\n doc_repo = DocumentFileRepository(tmp_path)\n file_path = Path(f\"{tmp_path}/sub/dir/doc.pdf\")\n file = File(file_path, b\"Hello World!\", True)\n doc = doc_repo._create_doc(file)\n assert doc.urls == set()\n\n\n@patch.object(FileRepository, 'save_all')\ndef test_save_all(repo_save, tmp_path):\n doc1 = Document('doc1', f'{tmp_path}/doc1.txt', 'This is an English document.')\n doc_repo = DocumentFileRepository(tmp_path)\n doc_repo.save_all({doc1})\n repo_save.assert_called_with({File(Path(doc1.path), doc1.text, False)})\n\n\n@patch.object(FileRepository, 'remove_all')\ndef test_save_all(repo_rem, tmp_path):\n doc1 = Document('doc1', f'{tmp_path}/doc1.txt', 'This is an English document.')\n doc_repo = DocumentFileRepository(tmp_path)\n doc_repo.remove_all({doc1})\n repo_rem.assert_called_with({File(Path(doc1.path), doc1.text, False)})\n\n\ndef test_pdf_reader_poor_extraction():\n text = 'Ein w(cid:246)rtlicher Match.'\n reader = PdfReader(None, lang='de', use_ocr=True)\n assert reader._poor_extraction(text)\n\n\ndef test_pdf_reader_poor_extraction_mark_umlaut():\n text = 'Ein w¨ortlicher Match.'\n reader = PdfReader(None, lang='de', use_ocr=True)\n assert reader._poor_extraction(text)\n\n\ndef test_pdf_reader_poor_extraction_ff():\n text = 'Eine fehlerhafte Veröffentlichung.'\n reader = PdfReader(None, lang='de', use_ocr=True)\n assert reader._poor_extraction(text)\n\n\ndef test_pdf_reader_poor_extraction_very_long_word():\n text = 'TheseWordsarewrongfullymergedtogetherduetoextractionproblems.'\n reader = PdfReader(None, lang='en', use_ocr=True)\n assert reader._poor_extraction(text)\n\n\ndef test_pdf_reader_poor_extraction_url():\n text = ' https://www.treatwell.de/partners/inspiration/blog/5-tipps-ihr-team-zu-motivieren'\n reader = PdfReader(None, lang='en', use_ocr=True)\n assert not reader._poor_extraction(text)\n\n\ndef test_pdf_reader_poor_extraction_no_text():\n text = ' '\n reader = PdfReader(None, lang='en', use_ocr=True)\n assert reader._poor_extraction(text)\n\n\ndef test_pdf_reader_poor_extraction_with_correct_text():\n text = 'This is flawless.'\n reader = PdfReader(None, lang='en', use_ocr=True)\n assert not reader._poor_extraction(text)\n\n\ndef test_pdf_reader_merges_hyphenated_words_at_line_end(tmp_path):\n doc1 = FPDF()\n doc1.add_page()\n doc1.set_font('helvetica', size=12)\n doc1.cell(txt=\"This is a PDF file con-\", ln=1)\n doc1.cell(txt=\"taining one sentence. However there are mul- \", ln=1)\n doc1.cell(txt=\"tiple line breaks which split words.\")\n doc1.output(f'{tmp_path}/doc1.pdf')\n reader = PdfReader(tmp_path / 'doc1.pdf', lang='en', use_ocr=True, )\n text = reader._extract()\n assert text == 'This is a PDF file containing one sentence. However there are multiple line breaks' \\\n ' which split words.'\n\n\n@patch(\"pdfplumber.open\", side_effect=UnicodeDecodeError(\"\", bytes(), -1, -1, \"\"))\ndef test_pdf_reader_extract_urls_returns_none_on_unicode_decode_error(pdf_mock, tmp_path):\n reader = PdfReader(tmp_path, lang='eng', use_ocr=True)\n urls = reader.extract_urls()\n assert not urls\n\n\n@patch.object(PdfReader, 'extract_text', side_effect=EncryptedPdfError())\ndef test_create_doc_extract_text_ignores_encrypted_pdf_error(extract_mock, tmp_path):\n doc_repo = DocumentFileRepository(tmp_path, use_ocr=True)\n file_path = Path(f\"{tmp_path}/sub/dir/doc.pdf\")\n file = File(file_path, \"Hello World!\", False)\n doc_repo._create_doc(file)\n\n\n@patch.object(PdfReader, 'extract_text', side_effect=PDFPasswordIncorrect())\ndef test_create_doc_extract_text_ignores_pdf_password_incorrect_error(extract_mock, tmp_path):\n doc_repo = DocumentFileRepository(tmp_path, use_ocr=True)\n file_path = Path(f\"{tmp_path}/sub/dir/doc.pdf\")\n file = File(file_path, \"Hello World!\", False)\n doc_repo._create_doc(file)\n","repo_name":"devWhyqueue/plagdef","sub_path":"plagdef/tests/repositories/test_document_file_repository.py","file_name":"test_document_file_repository.py","file_ext":"py","file_size_in_byte":6852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21938028266","text":"#Conference\r\n#Garnet Liu\r\nfrom graphics import *\r\nimport random\r\nimport time\r\nimport math\r\n\r\nimport os\r\nos.chdir('img')\r\n\r\n#dialougeBox\r\n#--------------------------------------------------------------------\r\n#BUTTON\r\n\r\nclass Button:\r\n\r\n def __init__(self, win, shape, color, label):\r\n self.shape = shape\r\n self.shape.setFill(color)\r\n self.shape.draw(win)\r\n if label != None:\r\n self.text = Text(shape.getCenter(), label)\r\n self.text.draw(win)\r\n self.color = color\r\n\r\n def press(self):\r\n self.shape.setFill(\"gray\")\r\n time.sleep(0.2)\r\n self.shape.setFill(self.color)\r\n\r\n def undraw(self):\r\n self.shape.undraw()\r\n self.text.undraw()\r\n\r\n#--------------------------------------------------------------------\r\n\r\nclass ArrowButton(Button):\r\n\r\n def __init__(self, win, p1, p2, p3, pl, pr, color):\r\n poly = Polygon(p1, p2, p3)\r\n Button.__init__(self, win, poly, color, None)\r\n self.position = Rectangle(pl, pr)\r\n self.pl = pl\r\n self.pr = pr\r\n\r\n def contains(self, point):\r\n x = point.getX()\r\n y = point.getY()\r\n leastX = self.pl.getX()\r\n greatestX = self.pr.getX()\r\n leastY = self.pl.getY()\r\n greatestY = self.pr.getY()\r\n if leastX <= x <= greatestX and leastY <= y <= greatestY:\r\n return True\r\n else:\r\n return False\r\n \r\n\r\n#-------------------------------------------------------------------\r\n\r\nclass SquareButton(Button):\r\n\r\n def __init__(self, win, x, y, size, color, label):\r\n self.x1 = x - size/2\r\n self.x2 = x + size/2\r\n self.y1 = y - size/2\r\n self.y2 = y + size/2\r\n upperLeft = Point(self.x1, self.y1)\r\n lowerRight = Point(self.x2, self.y2)\r\n rect = Rectangle(upperLeft, lowerRight)\r\n Button.__init__(self, win, rect, color, label)\r\n\r\n def contains(self, point):\r\n x = point.getX()\r\n y = point.getY()\r\n if self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2:\r\n return True\r\n else:\r\n return False\r\n\r\n#---------------------------------------------------------------\r\n\r\nclass Password:\r\n def __init__(self,p,n):\r\n self.x = p.getX()\r\n self.y = p.getY()\r\n self.key = Circle(p, .5)\r\n self.key.setFill(\"wheat\")\r\n self.keyText = Text(p, n)\r\n\r\n def todraw(self, win):\r\n self.key.draw(win)\r\n self.keyText.draw(win)\r\n\r\n def press(self):\r\n self.key.setFill(\"gray\")\r\n time.sleep(0.2)\r\n self.key.setFill(\"wheat\") \r\n\r\n def contains(self, point):\r\n x = point.getX()\r\n y = point.getY()\r\n distance = math.sqrt((x - self.x)**2 + (y - self.y)**2)\r\n if distance <= .5:\r\n return True\r\n else:\r\n return False\r\n def undraw(self):\r\n self.key.undraw()\r\n self.keyText.undraw()\r\n\r\n\r\n#---------------------------------------------------------------\r\n#Stuff\r\n\r\n#objects\r\nclass Objects:\r\n def __init__(self):\r\n #set up objects for draw and image location\r\n self.imgStimulant = Image(Point(2, 2.5), \"redpillbottle.gif\")\r\n self.imgStimulant.p = Point(2, 2.5)\r\n \r\n self.imgTranquilizer = Image(Point(2.2, 2), \"bluepillbottle.gif\")\r\n self.imgTranquilizer.p = Point(2.2, 2)\r\n \r\n self.imgCup = Image(Point(3.5, 2.5), \"cup.gif\")\r\n self.imgCup.p = Point(3.5, 2.5)\r\n\r\n self.imgCupE = Image(Point(3.5, 2.5), \"cupempty.gif\")\r\n self.imgCupE.p = Point(3.5, 2.5)\r\n \r\n self.imgKey2H = Image(Point(5, 2.5), \"goldkey.gif\")\r\n self.imgKey2H.p = Point(5, 2.5)\r\n \r\n self.imgKey2R3 = Image(Point(4, .6), \"bluekey.gif\")\r\n self.imgKey2R3.p = Point(4, .6)\r\n\r\n self.imgPhone = Image(Point(4, 1.9), \"cellphone.gif\")\r\n self.imgPhone.p = Point(4, 1.9)\r\n\r\n #unpickable item\r\n self.imgLetter = Image(Point(4, 3), \"letter.gif\")\r\n self.imgMedical = Image(Point(4,3),\"medicalhistory.gif\")\r\n self.imgRedPill = Image(Point(7,.7),\"redpill.gif\")\r\n self.imgBluePill = Image(Point(1.2,.7),\"bluepill.gif\")\r\n self.imgDiary = Image(Point(4, 3), \"diary.gif\")\r\n self.imgMedical2 = Image(Point(4,3),\"medicalhistory2.gif\")\r\n self.imgBrainJar = Image(Point(4,3),\"brain.gif\")\r\n\r\n\r\n \r\n self.allObjects = [self.imgStimulant,self.imgTranquilizer,\r\n self.imgCup,self.imgCupE,\r\n self.imgKey2H,self.imgKey2R3,\r\n self.imgPhone,\r\n self.imgLetter,self.imgMedical,\r\n self.imgRedPill,self.imgBluePill,\r\n self.imgDiary,\r\n self.imgMedical2,self.imgBrainJar]\r\n self.noNeed2Draw = [self.imgKey2H, self.imgKey2R3,\r\n self.imgRedPill,self.imgBluePill]\r\n\r\n#THE SHELF\r\nclass Shelf(Objects):\r\n def __init__(self):\r\n Objects.__init__(self)\r\n self.bposition = [Point(8.5, 4.5),Point(9.5, 4.5),Point(10.5, 4.5),\r\n Point(8.5, 3.5),Point(9.5, 3.5),Point(10.5, 3.5),\r\n Point(8.5, 2.5),Point(9.5, 2.5),Point(10.5, 2.5)]\r\n self.checkin = [False, False, False,\r\n False, False, False,\r\n False, False, False]\r\n #textbox\r\n self.textBox = Rectangle(Point(.5, 2), Point(7.5, .5))\r\n self.textBox.setFill(\"black\")\r\n self.message = \"\"\r\n self.text = Text(Point(4, 1.25), self.message)\r\n self.text.setFill(\"White\")\r\n self.text.setSize(20)\r\n\r\n\r\n\r\n def place(self, obj):\r\n for i in range(9):\r\n if self.checkin[i] == False:\r\n self.checkin[i] = obj\r\n dx = self.bposition[i].getX() - obj.p.getX()\r\n dy = self.bposition[i].getY() - obj.p.getY()\r\n obj.move(dx, dy)\r\n break\r\n\r\n def match(self,win, ButtonNum):\r\n #stimulant\r\n if self.checkin[ButtonNum] == self.allObjects[0]:\r\n self.updateText(win, \"Stimulant...Not for me...where to use?\")\r\n p = win.getMouse()\r\n if enterR(p,6,8,0,6) == True:\r\n return \"stimulant\"\r\n else:\r\n return \"\"\r\n \r\n #tranquilizer\r\n if self.checkin[ButtonNum] == self.allObjects[1]:\r\n self.updateText(win, \"Tranquilizer...Not for me...where to use?\")\r\n p = win.getMouse()\r\n if enterR(p,0,4.2,0,6) == True:\r\n return \"tranquilizer\"\r\n else:\r\n return \"\" \r\n\r\n \r\n #cup\r\n if self.checkin[ButtonNum] == self.allObjects[2]:\r\n self.updateText(win, \"Where to use the cup?\")\r\n p = win.getMouse()\r\n if enterR(p,4,6,4,5) == True:\r\n return \"spill\"\r\n elif self.decisionText(win, \"Drink it?\") == \"Y\":\r\n self.updateText(win, \"It's poisonous!\")\r\n return \"deadEnd\"\r\n else:\r\n return \"\"\r\n \r\n\r\n #cup empty\r\n if self.checkin[ButtonNum] == self.allObjects[3]:\r\n self.updateText(win, \"An empty cup...\")\r\n return \"\"\r\n \r\n \r\n #key2Hall\r\n if self.checkin[ButtonNum] == self.allObjects[4]:\r\n self.updateText(win, \"To use key, click the door\")\r\n p = win.getMouse()\r\n if enterR(p,5,7,1,4) == True:\r\n return \"go2Hall\"\r\n else:\r\n return \"\"\r\n #key2R3\r\n if self.checkin[ButtonNum] == self.allObjects[5]:\r\n self.updateText(win, \"To use key, click the door\")\r\n p = win.getMouse()\r\n if enterR(p,2.5,5.5,0,6) == True:\r\n return \"go2Room3\"\r\n else:\r\n return \"\"\r\n\r\n #cellphone\r\n if self.checkin[ButtonNum] == self.allObjects[6]:\r\n self.updateText(win, \"Power is on.\")\r\n if self.allObjects[11] in self.noNeed2Draw:\r\n if self.decisionText(win, \"There is signal.'Mary' is in the call log...should I call?\") == \"Y\":\r\n return \"end2\"\r\n\r\n\r\n\r\n else:\r\n return \"\"\r\n\r\n def updateText(self, win, msg):\r\n self.textBox.draw(win)\r\n self.text.draw(win)\r\n self.text.setText(msg)\r\n p = win.getMouse()\r\n self.text.undraw()\r\n self.text.setText(\"\")\r\n self.textBox.undraw()\r\n\r\n def decisionText(self, win, msg):\r\n self.textBox.draw(win)\r\n self.text.draw(win)\r\n self.text.setText(msg)\r\n self.yes = SquareButton(win, 7.375, 1.625, .75, \"green\", \"YES\")\r\n self.no = SquareButton(win, 7.375, .875, .75, \"red\", \"NO\")\r\n d = \"\"\r\n while d == \"\":\r\n p = win.getMouse()\r\n if self.yes.contains(p) == True:\r\n self.yes.press()\r\n d = \"Y\"\r\n elif self.no.contains(p) == True:\r\n self.no.press()\r\n d = \"N\"\r\n self.text.undraw()\r\n self.text.setText(\"\")\r\n self.textBox.undraw()\r\n self.yes.undraw()\r\n self.no.undraw()\r\n return d\r\n \r\n \r\n#---------------------------------------------------------------\r\n#MAP\r\n \r\n#room->set up in game(),setDirections\r\n\r\nclass BaseRoom:\r\n \r\n def __init__(self, name):\r\n self.name = name\r\n self.up = None\r\n self.down = None\r\n self.left = None\r\n self.right= None\r\n\r\n\r\n def __str__(self):\r\n return self.name\r\n \r\n def setDirections(self, u, d, l, r):\r\n self.up = u\r\n self.down = d\r\n self.left = l\r\n self.right = r\r\n\r\n\r\n\r\n\r\n \r\n\r\n#rooms\r\n\r\nclass BlackR(BaseRoom, Objects):\r\n\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Fin\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"black\")\r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n def undraw(self):\r\n self.wall.undraw() \r\n def getObject(self,p):\r\n None\r\n\r\nclass RedR(BaseRoom, Objects):\r\n\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Fin\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"red\")\r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n def undraw(self):\r\n self.wall.undraw() \r\n def getObject(self,p):\r\n None\r\n\r\nclass BlueR(BaseRoom, Objects):\r\n\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Fin\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"light cyan\")\r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n def undraw(self):\r\n self.wall.undraw() \r\n def getObject(self,p):\r\n None\r\n\r\n\r\nclass Room1F(BaseRoom, Objects):\r\n\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Room 1\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"beige\")\r\n self.floor = Polygon(Point(0,0), Point(8,0), Point(7,1), Point(1,1))\r\n self.floor.setFill(\"mint cream\")\r\n self.frontWall = Polygon(Point(1,1), Point(7,1), Point(7,6), Point(1,6))\r\n self.door2hall = Polygon(Point(5,1), Point(7,1), Point(7,4), Point(5,4))\r\n self.door2hall.setFill(\"gray\")\r\n\r\n\r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n self.floor.draw(win)\r\n self.frontWall.draw(win)\r\n self.door2hall.draw(win)\r\n\r\n def undraw(self):\r\n self.wall.undraw()\r\n self.floor.undraw()\r\n self.frontWall.undraw()\r\n self.door2hall.undraw()\r\n\r\n \r\n def getObject(self,p):\r\n None\r\n\r\n\r\n\r\n \r\n\r\n\r\nclass Room1L(BaseRoom, Objects):\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Room 1\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"beige\")\r\n self.floor = Polygon(Point(0,0), Point(8,0), Point(7,1), Point(1,1))\r\n self.floor.setFill(\"mint cream\")\r\n self.frontWall = Polygon(Point(1,1), Point(7,1), Point(7,6), Point(1,6))\r\n #furniture\r\n self.imgDesk = Image(Point(2.5, 1.19), \"desk.gif\")\r\n \r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n self.floor.draw(win)\r\n self.frontWall.draw(win)\r\n self.imgDesk.draw(win)\r\n \r\n #objects\r\n #stilmulant\r\n if self.allObjects[0] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[0].draw(win)\r\n #cup\r\n if self.allObjects[2] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[2].draw(win)\r\n\r\n def undraw(self):\r\n self.wall.undraw()\r\n self.floor.undraw()\r\n self.frontWall.undraw()\r\n self.imgDesk.undraw()\r\n\r\n #objects\r\n #stimulant\r\n if self.allObjects[0] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[0].undraw()\r\n #cup\r\n if self.allObjects[2] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[2].undraw()\r\n\r\n \r\n def getObject(self,p):\r\n #stimulant\r\n if enterR(p,1.5,2.5,2,3):\r\n return self.allObjects[0]\r\n #cup\r\n if enterR(p,3,4,2,3):\r\n return self.allObjects[2]\r\n else:\r\n return None\r\n\r\n\r\n\r\nclass Room1R(BaseRoom, Objects):\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Room 1\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"beige\")\r\n self.floor = Polygon(Point(0,0), Point(8,0), Point(7,1), Point(1,1))\r\n self.floor.setFill(\"mint cream\") \r\n self.frontWall = Polygon(Point(1,1), Point(7,1), Point(7,6), Point(1,6))\r\n #furniture\r\n self.imgMirror = Image(Point(5, 3.5), \"mirrorSnot.gif\")\r\n\r\n\r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n self.floor.draw(win)\r\n self.frontWall.draw(win)\r\n self.imgMirror.draw(win)\r\n #objects\r\n #key\r\n if self.allObjects[4] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[4].draw(win)\r\n\r\n def undraw(self):\r\n self.wall.undraw()\r\n self.floor.undraw()\r\n self.frontWall.undraw()\r\n self.imgMirror.undraw()\r\n\r\n #objects\r\n #key\r\n if self.allObjects[4] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[4].undraw()\r\n\r\n def getObject(self,p):\r\n #key\r\n if enterR(p,4.5,5.5,2.25,2.75):\r\n return self.allObjects[4]\r\n else:\r\n return None\r\n\r\n\r\nclass Room1B(BaseRoom, Objects):\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Room 1\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"beige\")\r\n self.floor = Polygon(Point(0,0), Point(8,0), Point(7,1), Point(1,1))\r\n self.floor.setFill(\"mint cream\")\r\n self.frontWall = Polygon(Point(1,1), Point(7,1), Point(7,6), Point(1,6))\r\n #furniture\r\n self.imgBed = Image(Point(3.5, .795), \"bed.gif\")\r\n #decor\r\n self.patch = Rectangle(Point(4, 4), Point(6, 5))\r\n self.patch.setFill(\"bisque2\")\r\n self.patch.setOutline(\"bisque2\")\r\n self.password = Text(Point(5,4.5), \"Isolation Room 3\")\r\n self.password.setSize(24)\r\n self.password.setFill(\"red\")\r\n \r\n \r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n self.floor.draw(win)\r\n self.frontWall.draw(win)\r\n self.imgBed.draw(win)\r\n self.patch.draw(win)\r\n\r\n def undraw(self):\r\n self.wall.undraw()\r\n self.floor.undraw()\r\n self.frontWall.undraw()\r\n self.imgBed.undraw()\r\n self.patch.undraw()\r\n self.password.undraw()\r\n\r\n def getObject(self,p):\r\n None\r\n\r\n def spill(self,win):\r\n self.password.draw(win)\r\n\r\nclass Room1Rm(BaseRoom, Objects):\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Room 1\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"beige\")\r\n self.imgMirrorL = Image(Point(4, 3), \"mirrorLnot.gif\")\r\n\r\n \r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n self.imgMirrorL.draw(win)\r\n\r\n def undraw(self):\r\n self.wall.undraw()\r\n self.imgMirrorL.undraw()\r\n\r\n def getObject(self,p):\r\n None\r\n\r\n\r\nclass HallF(BaseRoom, Objects):\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Hall\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"white\")\r\n self.floor = Polygon(Point(0,0), Point(3,3), Point(5,3), Point(8,0))\r\n self.floor.setFill(\"mint cream\")\r\n self.door2 = Polygon(Point(7,1), Point(6,2), Point(6,6), Point(7,5))\r\n self.door2.setFill(\"gray\")\r\n self.door1 = Polygon(Point(1,1), Point(2,2), Point(2,6), Point(1,5))\r\n self.door1.setFill(\"gray\")\r\n self.door3 = Polygon(Point(3,3), Point(5,3), Point(5,6), Point(3,6))\r\n self.door3.setFill(\"SkyBlue4\")\r\n \r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n self.floor.draw(win)\r\n self.door1.draw(win)\r\n self.door2.draw(win)\r\n self.door3.draw(win)\r\n\r\n def undraw(self):\r\n self.wall.undraw()\r\n self.floor.undraw()\r\n self.door1.undraw()\r\n self.door2.undraw()\r\n self.door3.undraw()\r\n\r\n def getObject(self,p):\r\n None\r\n\r\nclass HallR(BaseRoom, Objects):\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Hall\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"white\")\r\n self.imgOpenDoor = Image(Point(4,3),\"dooropen.gif\")\r\n self.imgCloseDoor = Image(Point(4,3),\"doorclose.gif\")\r\n \r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n self.imgCloseDoor.draw(win)\r\n\r\n def undraw(self):\r\n self.wall.undraw()\r\n self.imgCloseDoor.undraw()\r\n \r\n def getObject(self,p):\r\n None\r\n\r\nclass HallL(BaseRoom, Objects):\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Hall\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"white\")\r\n self.imgOpenDoor = Image(Point(4,3),\"dooropen.gif\")\r\n self.imgCloseDoor = Image(Point(4,3),\"doorclose.gif\")\r\n \r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n self.imgCloseDoor.draw(win)\r\n\r\n def undraw(self):\r\n self.wall.undraw()\r\n self.imgCloseDoor.undraw()\r\n \r\n def getObject(self,p):\r\n None\r\n\r\nclass HallZ(BaseRoom, Objects):\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Hall\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"white\")\r\n self.door = Rectangle(Point(1,0), Point(7,6))\r\n self.door.setFill(\"SkyBlue4\")\r\n self.text = Text(Point(4,5.5),\"LABOATORY NO.1 -- order matters\")\r\n self.text.setFill(\"red\")\r\n self.text.setSize(35)\r\n self.box = Rectangle(Point(2.5,4.5),Point(5.5,5.25))\r\n self.box.setFill(\"white\")\r\n self.boxText = Text(Point(4,4.875),\"\")\r\n self.boxText.setSize(35)\r\n \r\n self.code = \"\"\r\n self.key1 = Password(Point(3,4),\"1\")\r\n self.key2 = Password(Point(4,4),\"2\")\r\n self.key3 = Password(Point(5,4),\"3\")\r\n self.key4 = Password(Point(3,3),\"4\")\r\n self.key5 = Password(Point(4,3), \"5\")\r\n self.key6 = Password(Point(5,3),\"6\")\r\n self.key7 = Password(Point(3,2),\"7\")\r\n self.key8 = Password(Point(4,2),\"8\")\r\n self.key9 = Password(Point(5,2),\"9\")\r\n \r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n self.door.draw(win)\r\n self.text.draw(win)\r\n self.box.draw(win)\r\n self.boxText.draw(win)\r\n self.key1.todraw(win)\r\n self.key2.todraw(win)\r\n self.key3.todraw(win)\r\n self.key4.todraw(win)\r\n self.key5.todraw(win)\r\n self.key6.todraw(win)\r\n self.key7.todraw(win)\r\n self.key8.todraw(win)\r\n self.key9.todraw(win)\r\n\r\n def undraw(self):\r\n self.wall.undraw()\r\n self.door.undraw()\r\n self.text.undraw()\r\n self.box.undraw()\r\n self.boxText.undraw()\r\n self.key1.undraw()\r\n self.key2.undraw()\r\n self.key3.undraw()\r\n self.key4.undraw()\r\n self.key5.undraw()\r\n self.key6.undraw()\r\n self.key7.undraw()\r\n self.key8.undraw()\r\n self.key9.undraw()\r\n \r\n def getObject(self,p):\r\n None\r\n\r\n\r\n\r\nclass Room2(BaseRoom, Objects):\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Room 2\")\r\n Objects.__init__(self)\r\n #background\r\n self.imgRoom2 = Image(Point(4, 3), \"hospitalroom.gif\")\r\n \r\n def todraw(self, win):\r\n self.imgRoom2.draw(win)\r\n #objects\r\n #tanquilizer\r\n if self.allObjects[1] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[1].draw(win)\r\n\r\n def undraw(self):\r\n self.imgRoom2.undraw()\r\n #tranquilizer\r\n if self.allObjects[1] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[1].undraw()\r\n\r\n def getObject(self,p):\r\n #tranquilizer\r\n if enterR(p,1.7,2.7,1.5,2.5):\r\n return self.allObjects[1]\r\n\r\nclass Room2Z(BaseRoom, Objects):\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Room 2\")\r\n Objects.__init__(self)\r\n #background\r\n self.imgRoom2Z = Image(Point(4, 3), \"room2Z2.gif\")\r\n\r\n \r\n def todraw(self, win):\r\n self.imgRoom2Z.draw(win)\r\n #key2R3\r\n if self.allObjects[5] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[5].draw(win)\r\n #tranquilizer/stimulant\r\n if self.allObjects[9] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[9].draw(win)\r\n if self.allObjects[10] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[10].draw(win)\r\n\r\n\r\n\r\n def undraw(self):\r\n self.imgRoom2Z.undraw()\r\n self.allObjects[9].undraw()\r\n self.allObjects[10].undraw()\r\n #key2R3\r\n if self.allObjects[5] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[5].undraw()\r\n\r\n\r\n def getObject(self,p):\r\n #key2R3\r\n if enterR(p,3,5,0,1):\r\n return self.allObjects[5]\r\n\r\n\r\nclass Room3F(BaseRoom, Objects):\r\n\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Room 3\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"IndianRed4\")\r\n self.floor = Polygon(Point(0,0), Point(8,0), Point(7,1), Point(1,1))\r\n self.floor.setFill(\"steel blue\")\r\n self.frontWall = Polygon(Point(1,1), Point(7,1), Point(7,6), Point(1,6))\r\n self.door2hall = Rectangle(Point(1,1), Point(3,4))\r\n self.door2hall.setFill(\"gray\")\r\n\r\n\r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n self.floor.draw(win)\r\n self.frontWall.draw(win)\r\n self.door2hall.draw(win)\r\n\r\n def undraw(self):\r\n self.wall.undraw()\r\n self.floor.undraw()\r\n self.frontWall.undraw()\r\n self.door2hall.undraw()\r\n\r\n \r\n def getObject(self,p):\r\n None\r\n\r\nclass Room3L(BaseRoom, Objects):\r\n\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Room 3\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"IndianRed4\")\r\n self.floor = Polygon(Point(0,0), Point(8,0), Point(7,1), Point(1,1))\r\n self.floor.setFill(\"steel blue\")\r\n self.frontWall = Polygon(Point(1,1), Point(7,1), Point(7,6), Point(1,6))\r\n\r\n\r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n self.floor.draw(win)\r\n self.frontWall.draw(win)\r\n\r\n\r\n def undraw(self):\r\n self.wall.undraw()\r\n self.floor.undraw()\r\n self.frontWall.undraw()\r\n\r\n\r\n \r\n def getObject(self,p):\r\n None\r\n\r\nclass Room3B(BaseRoom, Objects):\r\n\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Room 3\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"IndianRed4\")\r\n self.floor = Polygon(Point(0,0), Point(8,0), Point(7,1), Point(1,1))\r\n self.floor.setFill(\"steel blue\")\r\n self.frontWall = Polygon(Point(1,1), Point(7,1), Point(7,6), Point(1,6))\r\n self.imgCab = Image(Point(4, 1.5), \"cabinet.gif\")\r\n self.imgFam = Image(Point(3, 3), \"familypic.gif\")\r\n\r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n self.floor.draw(win)\r\n self.frontWall.draw(win)\r\n self.imgCab.draw(win)\r\n self.imgFam.draw(win)\r\n\r\n\r\n def undraw(self):\r\n self.wall.undraw()\r\n self.floor.undraw()\r\n self.frontWall.undraw()\r\n self.imgCab.undraw()\r\n self.imgFam.undraw()\r\n\r\n\r\n \r\n def getObject(self,p):\r\n None\r\n\r\n\r\nclass Room3R(BaseRoom, Objects):\r\n\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Room 3\")\r\n Objects.__init__(self)\r\n self.wall = Polygon(Point(0,0), Point(8,0), Point(8,6), Point(0,6))\r\n self.wall.setFill(\"IndianRed4\")\r\n self.floor = Polygon(Point(0,0), Point(8,0), Point(7,1), Point(1,1))\r\n self.floor.setFill(\"steel blue\")\r\n self.frontWall = Polygon(Point(1,1), Point(7,1), Point(7,6), Point(1,6))\r\n self.imgSofa = Image(Point(3.2, 1.34), \"sofa.gif\")\r\n\r\n def todraw(self, win):\r\n self.wall.draw(win)\r\n self.floor.draw(win)\r\n self.frontWall.draw(win)\r\n self.imgSofa.draw(win)\r\n #cellphone\r\n if self.allObjects[6] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[6].draw(win)\r\n\r\n\r\n\r\n def undraw(self):\r\n self.wall.undraw()\r\n self.floor.undraw()\r\n self.frontWall.undraw()\r\n self.imgSofa.undraw()\r\n #cellphone\r\n if self.allObjects[6] in self.noNeed2Draw:\r\n pass\r\n else:\r\n self.allObjects[6].undraw()\r\n\r\n\r\n \r\n def getObject(self,p):\r\n #cellphone\r\n if enterR(p,3.5,4.5,1.4,2.4):\r\n return self.allObjects[6]\r\n\r\nclass Room4(BaseRoom, Objects):\r\n def __init__(self):\r\n BaseRoom.__init__(self, \"Room 4\")\r\n Objects.__init__(self)\r\n #background\r\n self.imgRoom4 = Image(Point(4, 3), \"laboratory.gif\")\r\n self.imgBrain = Image(Point(2.7, 2.2), \"brainicon.gif\")\r\n \r\n def todraw(self, win):\r\n self.imgRoom4.draw(win)\r\n self.imgBrain.draw(win)\r\n\r\n def undraw(self):\r\n self.imgRoom4.undraw()\r\n self.imgBrain.undraw()\r\n\r\n\r\n def getObject(self,p):\r\n None\r\n\r\n\r\n#------------------------------------------------------------\r\n#motion\r\n \r\ndef enterR(p,leastX,greatestX,leastY,greatestY):\r\n if leastX <= p.getX() <= greatestX and leastY <= p.getY() <= greatestY:\r\n return True\r\n else:\r\n return False\r\n \r\n\r\n#----------------------------------------------------------\r\n#Gamepad\r\n\r\n\r\ndef game():\r\n #win\r\n win = GraphWin(\"\",1100,600)\r\n win.setCoords(0.0, 0.0, 11.0, 6.0)\r\n #background\r\n sepLine = Line(Point(8, 0), Point(8, 6))\r\n sepLine.draw(win)\r\n arrowB = Rectangle(Point(8, 0), Point(11, 2))\r\n arrowB.setFill(\"lavender\")\r\n arrowB.draw(win)\r\n #Indicator\r\n display = Text(Point(9,5.5), \"\")\r\n display.setSize(24)\r\n display.draw(win)\r\n\r\n #shelf\r\n shelf = Shelf()\r\n \r\n #buttons\r\n exit = SquareButton(win, 10.5, 5.5, 1, \"red\", \"QUIT\")\r\n uButton = ArrowButton(win, Point(9, 1), Point(10, 1), Point(9.5, 2),Point(9, 1), Point(10, 2),\"cornsilk\")\r\n lButton = ArrowButton(win, Point(8, .5), Point(9, 0), Point(9, 1), Point(8, 0), Point(9, 2),\"cornsilk\")\r\n rButton = ArrowButton(win, Point(10, 0), Point(11, .5), Point(10, 1), Point(10, 0), Point(11, 2),\"cornsilk\")\r\n dButton = ArrowButton(win, Point(9,1), Point(9.5, 0), Point(10, 1), Point(9, 0), Point(10, 1),\"cornsilk\")\r\n\r\n item1 = SquareButton(win, 8.5, 4.5, 1, \"white\", None)\r\n item2 = SquareButton(win, 9.5, 4.5, 1, \"white\", None)\r\n item3 = SquareButton(win, 10.5, 4.5, 1, \"white\", None)\r\n item4 = SquareButton(win, 8.5, 3.5, 1, \"white\", None)\r\n item5 = SquareButton(win, 9.5, 3.5, 1, \"white\", None)\r\n item6 = SquareButton(win, 10.5, 3.5, 1, \"white\", None)\r\n item7 = SquareButton(win, 8.5, 2.5, 1, \"white\", None)\r\n item8 = SquareButton(win, 9.5, 2.5, 1, \"white\", None)\r\n item9 = SquareButton(win, 10.5, 2.5, 1, \"white\", None)\r\n \r\n #set up rooms\r\n room1F = Room1F()\r\n room1B = Room1B()\r\n room1L = Room1L()\r\n room1R = Room1R()\r\n room1Rm = Room1Rm()\r\n hallF = HallF()\r\n hallR = HallR()\r\n hallL = HallL()\r\n hallZ = HallZ()\r\n room2 = Room2()\r\n room2Z = Room2Z()\r\n room3F = Room3F()\r\n room3L = Room3L()\r\n room3B = Room3B()\r\n room3R = Room3R()\r\n room4 = Room4()\r\n\r\n redR = RedR()\r\n blueR = BlueR()\r\n blackR = BlackR()\r\n \r\n \r\n #set up connections between rooms\r\n room1F.setDirections(None, None, room1L, room1R)\r\n room1L.setDirections(None, None, room1B, room1F)\r\n room1B.setDirections(None, None, room1R, room1L)\r\n room1R.setDirections(None, None, room1F, room1B)\r\n room1Rm.setDirections(None,room1R, room1R, room1R)\r\n hallF.setDirections(hallZ, room1B, hallL, hallR)\r\n hallR.setDirections(room2, hallF, hallF, hallF)\r\n hallL.setDirections(None, hallF, hallF, hallF)\r\n hallZ.setDirections(None, hallF, None, None)\r\n room2.setDirections(None, hallF, None, None)\r\n room2Z.setDirections(None, room2,None,None)\r\n room3F.setDirections(hallF, None, room3L, room3R)\r\n room3L.setDirections(None, None, room3B, room3F)\r\n room3B.setDirections(None, None, room3R, room3L)\r\n room3R.setDirections(None, None, room3F, room3B)\r\n room4.setDirections(None,hallF,None,None)\r\n\r\n redR.setDirections(None, None, None, None)\r\n blueR.setDirections(None, None, None, None)\r\n blackR.setDirections(None, None, None, None)\r\n \r\n #set up current location\r\n currentLocation = room1B\r\n currentLocation.todraw(win)\r\n\r\n #command\r\n# command = \"\"\r\n command = \"wakeUp\"\r\n \r\n #START LOOP\r\n while win.isClosed() == False:\r\n #--------------EXECUTE COMMAND-----------------\r\n if command != \"\":\r\n if command == \"wakeUp\" and currentLocation == room1B:\r\n shelf.updateText(win, \"I woke up\")\r\n shelf.updateText(win, \"I don't know where I am.\")\r\n shelf.updateText(win, \"I don't remember anything...Ahh...Headache!\")\r\n shelf.updateText(win, \"I'd better start searching around (click again to continue)\")\r\n command = \"\"\r\n \r\n elif command == \"go2Hall\" and currentLocation == room1F:\r\n currentLocation.undraw()\r\n currentLocation = hallF\r\n currentLocation.todraw(win)\r\n command = \"\"\r\n\r\n elif command == \"go2Hall\" and currentLocation == room3F:\r\n currentLocation.undraw()\r\n currentLocation = hallF\r\n currentLocation.todraw(win)\r\n command = \"\"\r\n \r\n elif command == \"go2HallR\":\r\n currentLocation.undraw()\r\n currentLocation = hallR\r\n currentLocation.todraw(win)\r\n command = \"\"\r\n \r\n elif command == \"go2HallL\":\r\n currentLocation.undraw()\r\n currentLocation = hallL\r\n currentLocation.todraw(win)\r\n command = \"\"\r\n\r\n elif command == \"go2HallZ\":\r\n currentLocation.undraw()\r\n currentLocation = hallZ\r\n currentLocation.todraw(win)\r\n command = \"\"\r\n\r\n elif command == \"go2Room2\" and currentLocation == hallR:\r\n currentLocation.imgOpenDoor.draw(win)\r\n time.sleep(.5)\r\n currentLocation.undraw()\r\n currentLocation.imgOpenDoor.undraw()\r\n currentLocation = room2\r\n currentLocation.todraw(win)\r\n command = \"\"\r\n\r\n elif command == \"go2Room3\" and currentLocation == hallL:\r\n currentLocation.imgOpenDoor.draw(win)\r\n time.sleep(.5)\r\n currentLocation.undraw()\r\n currentLocation.imgOpenDoor.undraw()\r\n currentLocation = room3B\r\n currentLocation.todraw(win)\r\n command = \"\"\r\n\r\n elif command == \"go2Room4\":\r\n currentLocation.undraw()\r\n currentLocation = room4\r\n currentLocation.todraw(win)\r\n command = \"\"\r\n\r\n elif command == \"go2Room2Z\":\r\n currentLocation.undraw()\r\n currentLocation = room2Z\r\n currentLocation.todraw(win)\r\n command = \"\"\r\n\r\n \r\n elif command == \"go2Room1Rm\":\r\n currentLocation.undraw()\r\n currentLocation = room1Rm\r\n currentLocation.todraw(win)\r\n p = win.getMouse()\r\n shelf.updateText(win, \"Who is this man? Me??\")\r\n shelf.updateText(win, \"I cannot believe I can't even recognize myself. What happend?\")\r\n \r\n if shelf.allObjects[2] in shelf.checkin:\r\n if shelf.decisionText(win, \"Somehow I just hate to see this face...Use the cup to breake it? \") == \"Y\":\r\n shelf.allObjects[2].undraw()\r\n shelf.checkin[shelf.checkin.index(shelf.allObjects[2])] = False\r\n room1R.imgMirror = Image(Point(5, 3.5), \"mirrorSsh.gif\")\r\n currentLocation.undraw()\r\n room1Rm.imgMirrorL = Image(Point(4, 3), \"mirrorLsh.gif\")\r\n currentLocation = room1R\r\n currentLocation.todraw(win)\r\n room1L.noNeed2Draw.append(room1L.allObjects[2])\r\n \r\n elif shelf.allObjects[3] in shelf.checkin:\r\n if shelf.decisionText(win, \"Somehow I just hate to see this face...Use the cup to breake it?\") == \"Y\":\r\n shelf.allObjects[3].undraw()\r\n shelf.checkin[shelf.checkin.index(shelf.allObjects[3])] = False\r\n room1R.imgMirror = Image(Point(5, 3.5), \"mirrorSsh.gif\")\r\n currentLocation.undraw()\r\n room1Rm.imgMirrorL = Image(Point(4, 3), \"mirrorLsh.gif\")\r\n currentLocation = room1R\r\n currentLocation.todraw(win)\r\n \r\n #cup is used\r\n elif room1L.allObjects[2] in room1L.noNeed2Draw and shelf.allObjects[2] not in shelf.checkin:\r\n #emptyCup is not present\r\n if shelf.allObjects[3] not in shelf.checkin:\r\n #doc1 Letter +Dear... Don't worry you'll live\r\n shelf.allObjects[7].draw(win)\r\n shelf.updateText(win, \"A piece of paper hides behind the mirror...Let's read it.\")\r\n shelf.updateText(win, \"'I know that I won't have much time.'\")\r\n shelf.updateText(win, \"'They all cheat me that I'm gonna recover...Even Jane says so!'\")\r\n shelf.updateText(win, \"'I know my body...It's gonna collapse.'\")\r\n shelf.updateText(win, \"What does this mean??\")\r\n shelf.allObjects[7].undraw()\r\n currentLocation.undraw()\r\n currentLocation = room1R\r\n currentLocation.todraw(win)\r\n\r\n command = \"\"\r\n\r\n elif command == \"stimulant\" and currentLocation == room2Z:\r\n shelf.allObjects[0].undraw()\r\n shelf.checkin[shelf.checkin.index(shelf.allObjects[0])] = False\r\n room1L.noNeed2Draw.append(room1L.allObjects[0])\r\n #redpill\r\n currentLocation.allObjects[9].draw(win)\r\n currentLocation.noNeed2Draw.remove(currentLocation.allObjects[9])\r\n if currentLocation.allObjects[5] in currentLocation.noNeed2Draw and shelf.allObjects[5] not in shelf.checkin:\r\n if room2Z.allObjects[9] not in room2Z.noNeed2Draw and room2Z.allObjects[10] not in room2Z.noNeed2Draw:\r\n currentLocation.noNeed2Draw.remove(currentLocation.allObjects[5])\r\n currentLocation.undraw()\r\n currentLocation = room2\r\n currentLocation.todraw(win)\r\n shelf.updateText(win, \"Sound in the cabinet!\")\r\n command = \"\"\r\n\r\n elif command == \"tranquilizer\" and currentLocation == room2Z:\r\n shelf.allObjects[1].undraw()\r\n shelf.checkin[shelf.checkin.index(shelf.allObjects[1])] = False\r\n room1L.noNeed2Draw.append(room2.allObjects[1])\r\n #redpill\r\n currentLocation.allObjects[10].draw(win)\r\n currentLocation.noNeed2Draw.remove(currentLocation.allObjects[10])\r\n if currentLocation.allObjects[5] in currentLocation.noNeed2Draw and shelf.allObjects[5] not in shelf.checkin:\r\n if room2Z.allObjects[9] not in room2Z.noNeed2Draw and room2Z.allObjects[10] not in room2Z.noNeed2Draw:\r\n currentLocation.noNeed2Draw.remove(currentLocation.allObjects[5])\r\n currentLocation.undraw()\r\n currentLocation = room2\r\n currentLocation.todraw(win)\r\n shelf.updateText(win, \"Sound in the cabinet!\")\r\n command = \"\"\r\n \r\n elif command == \"spill\" and currentLocation == room1B:\r\n #remove cup\r\n shelf.allObjects[2].undraw()\r\n shelf.checkin[shelf.checkin.index(shelf.allObjects[2])] = False\r\n room1L.noNeed2Draw.append(room1L.allObjects[2])\r\n #draw empty cup\r\n shelf.allObjects[3].draw(win)\r\n shelf.place(shelf.allObjects[3])\r\n currentLocation.spill(win)\r\n command = \"\"\r\n\r\n #ENDINGS+blackouts\r\n elif command == \"end1\":\r\n currentLocation.undraw()\r\n currentLocation = blackR\r\n currentLocation.todraw(win)\r\n shelf.updateText(win, \"A woman picked call, and claimed to be my wife, Jane.\")\r\n shelf.updateText(win, \"I couldn't refute her because she showed me my photo ID.\")\r\n shelf.updateText(win, \"She told me that there was a car crush which caused my memory loss.\")\r\n shelf.updateText(win, \"I trusted her.\")\r\n shelf.updateText(win, \"NORMAL END\")\r\n command = \"finishGame\"\r\n \r\n elif command == \"end2\":\r\n currentLocation.undraw()\r\n currentLocation = blackR\r\n currentLocation.todraw(win)\r\n shelf.updateText(win, \"I called Mary, and she cried when hearing my voice...\")\r\n shelf.updateText(win, \"She said my disappearance blew her, and she thought I might be dead.\")\r\n shelf.updateText(win, \"I promised her I would be back.\")\r\n shelf.updateText(win, \"NORMAL END\")\r\n command = \"finishGame\"\r\n\r\n elif command == \"trueEnd\":\r\n currentLocation.undraw()\r\n currentLocation = blueR\r\n currentLocation.todraw(win)\r\n shelf.updateText(win, \"'I' survived in this body as 'experiment model,' but lost memory.\")\r\n shelf.updateText(win, \"'My body's' brain was perserved in that jar...'Jason' is dead.\")\r\n shelf.updateText(win, \"I would continue to use this body, and start my new life.\")\r\n shelf.updateText(win, \"TRUE END\")\r\n command = \"finishGame\"\r\n\r\n elif command == \"deadEnd\":\r\n currentLocation.undraw()\r\n currentLocation = redR\r\n currentLocation.todraw(win)\r\n shelf.updateText(win, \"DEAD END\")\r\n command = \"finishGame\"\r\n\r\n elif command == \"finishGame\":\r\n currentLocation.undraw()\r\n currentLocation = blueR\r\n currentLocation.todraw(win)\r\n shelf.updateText(win, \"Thanks for playing!\")\r\n win.close()\r\n \r\n else:\r\n command = \"\"\r\n \r\n \r\n \r\n #--------------GET MOUSE------------------------\r\n else:\r\n p = win.getMouse() \r\n #quit game\r\n if exit.contains(p) == True:\r\n exit.press()\r\n win.close()\r\n \r\n #move between rooms\r\n if uButton.contains(p) == True:\r\n uButton.press()\r\n if currentLocation.up != None:\r\n currentLocation.undraw()\r\n currentLocation = currentLocation.up\r\n currentLocation.todraw(win)\r\n \r\n elif lButton.contains(p) == True:\r\n lButton.press()\r\n if currentLocation.left != None:\r\n currentLocation.undraw()\r\n currentLocation = currentLocation.left\r\n currentLocation.todraw(win)\r\n \r\n elif dButton.contains(p) == True:\r\n dButton.press()\r\n if currentLocation.down != None:\r\n currentLocation.undraw()\r\n currentLocation = currentLocation.down\r\n currentLocation.todraw(win)\r\n \r\n elif rButton.contains(p) == True:\r\n rButton.press()\r\n if currentLocation.right != None:\r\n currentLocation.undraw()\r\n currentLocation = currentLocation.right\r\n currentLocation.todraw(win)\r\n \r\n else:\r\n #-----------CONDITION TRIGGER\r\n\r\n #go to hallF\r\n if currentLocation == room3F and enterR(p,1,3,1,4) == True:\r\n command = \"go2Hall\"\r\n\r\n #go hallR\r\n if currentLocation == hallF and enterR(p,6,7,2,4) == True:\r\n command = \"go2HallR\"\r\n #go hallL\r\n if currentLocation == hallF and enterR(p,1,2,2,4) == True:\r\n command = \"go2HallL\"\r\n #go hallZ\r\n if currentLocation == hallF and enterR(p,3,5,3,6) == True:\r\n command = \"go2HallZ\"\r\n \r\n #go open the door in hallR enter room2\r\n if currentLocation == hallR and enterR(p,2.5,5.5,0,6) == True:\r\n command = \"go2Room2\"\r\n\r\n #go to room2Z\r\n if currentLocation == room2 and enterR(p,3.8,5.2,4.3,5.5) == True:\r\n command = \"go2Room2Z\"\r\n\r\n #go to room4\r\n if currentLocation == hallZ:\r\n if currentLocation.key3.contains(p)==True:\r\n currentLocation.key3.press()\r\n currentLocation.boxText.setText(\"----\")\r\n p = win.getMouse()\r\n if currentLocation.key5.contains(p)==True:\r\n currentLocation.key5.press()\r\n p = win.getMouse()\r\n if currentLocation.key2.contains(p)==True:\r\n currentLocation.key2.press()\r\n p = win.getMouse()\r\n if currentLocation.key1.contains(p)==True:\r\n currentLocation.key1.press()\r\n currentLocation.boxText.setText(\"PASS\")\r\n time.sleep(.2)\r\n command = \"go2Room4\"\r\n elif currentLocation.key1.contains(p)==True:\r\n currentLocation.key1.press()\r\n currentLocation.boxText.setText(\"----\")\r\n elif currentLocation.key2.contains(p)==True:\r\n currentLocation.key2.press()\r\n currentLocation.boxText.setText(\"----\") \r\n elif currentLocation.key4.contains(p)==True:\r\n currentLocation.key4.press()\r\n currentLocation.boxText.setText(\"----\")\r\n elif currentLocation.key5.contains(p)==True:\r\n currentLocation.key5.press()\r\n currentLocation.boxText.setText(\"----\")\r\n elif currentLocation.key6.contains(p)==True:\r\n currentLocation.key6.press()\r\n currentLocation.boxText.setText(\"----\") \r\n elif currentLocation.key7.contains(p)==True:\r\n currentLocation.key7.press()\r\n currentLocation.boxText.setText(\"----\")\r\n elif currentLocation.key8.contains(p)==True:\r\n currentLocation.key8.press()\r\n currentLocation.boxText.setText(\"----\")\r\n elif currentLocation.key9.contains(p)==True:\r\n currentLocation.key9.press()\r\n currentLocation.boxText.setText(\"----\") \r\n \r\n\r\n #telephone\r\n if currentLocation == room2 and enterR(p, 4.2, 4.8, 2.1, 2.5) == True:\r\n if shelf.allObjects[8] in shelf.noNeed2Draw:\r\n if shelf.decisionText(win, \"Call the relative's number on the medical paper?\") == \"Y\":\r\n command = \"end1\"\r\n else:\r\n shelf.updateText(win, \"A telephone.\")\r\n\r\n #Key2R2 mirror trigger + make key appear\r\n if currentLocation == room1R and enterR(p, 4, 6, 2.75, 4.5) == True:\r\n command = \"go2Room1Rm\"\r\n #key is not present\r\n if currentLocation.allObjects[4] in currentLocation.noNeed2Draw and shelf.allObjects[4] not in shelf.checkin:\r\n #cup is used\r\n if room1L.allObjects[2] in room1L.noNeed2Draw and shelf.allObjects[2] not in shelf.checkin:\r\n #emptyCup is not present\r\n if shelf.allObjects[3] not in shelf.checkin:\r\n currentLocation.noNeed2Draw.remove(currentLocation.allObjects[4])\r\n\r\n\r\n #brain->trueEnd\r\n if currentLocation == room4 and enterR(p, 2.2, 2.8, 1.7, 2.7):\r\n if shelf.allObjects[12] in shelf.noNeed2Draw:\r\n shelf.allObjects[13].draw(win)\r\n shelf.updateText(win, \"......\")\r\n if shelf.decisionText(win,\"Is this my brain?\") == \"Y\":\r\n command = \"end2\"\r\n else:\r\n command = \"trueEnd\"\r\n else:\r\n shelf.updateText(win, \"A jar...of brain...\")\r\n\r\n #----------------COMMENTS\r\n #Locked door in room1\r\n if currentLocation == room1F and enterR(p,5,7,1,4) == True:\r\n if shelf.allObjects[4] in shelf.checkin:\r\n shelf.updateText(win, \"Use key to unlock\")\r\n else:\r\n shelf.updateText(win, \"Door is locked\")\r\n\r\n #Locked door in HallL\r\n if currentLocation == hallL and enterR(p,2.5,5.5,0,6) == True:\r\n if shelf.allObjects[5] in shelf.checkin:\r\n shelf.updateText(win, \"Use key to unlock\")\r\n else:\r\n shelf.updateText(win, \"Door is locked\")\r\n \r\n #patch\r\n if currentLocation == room1B and enterR(p, 4, 6, 4, 5) == True:\r\n if shelf.allObjects[2] in shelf.checkin:\r\n shelf.updateText(win, \"powder on the wall seems like can be dissolved...\")\r\n elif shelf.allObjects[2] in room1L.noNeed2Draw:\r\n shelf.updateText(win, \"What does that mean?\")\r\n else:\r\n shelf.updateText(win, \"Why is here colored? It feels soft...Oops, some dregs fall.\")\r\n\r\n #bed\r\n if currentLocation == room1B and enterR(p, .5, 6.5, 0, 1.59) == True:\r\n shelf.updateText(win, \"Where I woke up. This bed looks very new.\")\r\n\r\n #sofa\r\n if currentLocation == room3R and enterR(p, .5, 6, 0, 3) == True:\r\n shelf.updateText(win, \"Sofa...a good place to rest.\")\r\n\r\n\r\n #table\r\n if currentLocation == room1L and enterR(p, .5, 4.5, 0, 2.39) == True:\r\n shelf.updateText(win, \"A table. Nothing special.\")\r\n\r\n #doc2 medical history +th patient +weak,legs cannot walk +requires stimulant to keep awake\r\n if currentLocation == room2 and enterR(p,2.4,4.2,1,2) == True:\r\n shelf.allObjects[8].draw(win)\r\n shelf.updateText(win, \"A sheet of medical history...the photo is absent\")\r\n shelf.updateText(win, \"'Leonard...28...necrotic leg ulcer...loss of walking ability'\")\r\n shelf.updateText(win, \"'Chronic dehydration...Constant depression'\")\r\n shelf.updateText(win,\"'...requires stimulant to keep consciousness.'\")\r\n shelf.updateText(win, \"'All conditions fulfilled...Consider to be 5th case.'\")\r\n shelf.updateText(win, \"Why is the photo not here? Am I the patient...?\")\r\n shelf.updateText(win, \"It can't be me...My legs are perfectly fine!...What does 5th case mean...?\")\r\n shelf.allObjects[8].undraw()\r\n shelf.noNeed2Draw.append(shelf.allObjects[8])\r\n\r\n #doc3 diary +Mary+athletics identity NO.1\r\n if currentLocation == room3B and enterR(p,2,6,.5,2.4) == True:\r\n if currentLocation.imgFam in currentLocation.noNeed2Draw:\r\n shelf.allObjects[11].draw(win)\r\n shelf.updateText(win, \"Find a diary!\")\r\n shelf.updateText(win, \"'...Jason.S.Siemon, 2nd place in this track& field meet.'\")\r\n shelf.updateText(win,\"' Mary said she was proud of me. Won't lose again!'\")\r\n shelf.updateText(win, \"'I need to find some places for special training next month...'\") \r\n shelf.updateText(win, \"Doesn't feel like me...\")\r\n shelf.updateText(win,\"but according to the picture, is my name Jason? Am I the one wrote this diary?\")\r\n shelf.allObjects[11].undraw()\r\n shelf.noNeed2Draw.append(shelf.allObjects[11])\r\n else:\r\n shelf.updateText(win, \"A cabinet, but something else catches my attention...\")\r\n\r\n #doc4 labs +brain exchange\r\n if currentLocation == room4 and enterR(p,2.7,4.8,.3,2) == True:\r\n shelf.allObjects[12].draw(win)\r\n shelf.updateText(win, \"Find a pile of documents! It has a photo...very familiar...\")\r\n shelf.updateText(win, \"'Case 5 (L&J): serious rejections happend after implantation'\")\r\n shelf.updateText(win, \"'major surgery revision needed.'\")\r\n shelf.updateText(win, \"'...The body passed danger period'\")\r\n shelf.updateText(win, \"'Observation report to be continued...report from AACCGTTGATCCGCT BASE'\")\r\n shelf.updateText(win, \"...So my real identity is...\")\r\n shelf.allObjects[12].undraw()\r\n shelf.noNeed2Draw.append(shelf.allObjects[12])\r\n\r\n\r\n\r\n\r\n\r\n #doll...\r\n if currentLocation == room2Z and enterR(p, 0, 2, 0, 6) == True:\r\n shelf.updateText(win, \"This face seems maniac\")\r\n if currentLocation == room2Z and enterR(p, 6, 8, 0, 6) == True:\r\n shelf.updateText(win, \"This face seems sad\")\r\n\r\n #family\r\n if currentLocation == room3B and enterR(p, 2.5, 3.5, 2.5, 3.5) == True:\r\n shelf.updateText(win, \"A family picture. Seems like this man is with his wife and 2 children.\")\r\n shelf.updateText(win, \"The man looks exactly like my reflection in the mirror...\")\r\n shelf.updateText(win,\"But I have no idea about the other 3 people in the picture.\")\r\n #only as a condition trigger/not affecting drawing\r\n currentLocation.noNeed2Draw.append(currentLocation.imgFam)\r\n \r\n #-------------OBJECT PICK UP\r\n obj = currentLocation.getObject(p)\r\n #stimulant\r\n if obj == currentLocation.allObjects[0]:\r\n shelf.allObjects[0] = obj\r\n if obj not in currentLocation.noNeed2Draw:\r\n shelf.updateText(win, \"Got stimulant. What am I suppose to do? I don't feel take it.\")\r\n shelf.place(obj)\r\n currentLocation.noNeed2Draw.append(obj)\r\n #tanquilizer\r\n elif obj == currentLocation.allObjects[1]:\r\n shelf.allObjects[1] = obj\r\n if obj not in currentLocation.noNeed2Draw:\r\n shelf.updateText(win, \"Got tranquilizer. What am I suppose to do? I don't feel take it.\")\r\n shelf.place(obj)\r\n currentLocation.noNeed2Draw.append(obj)\r\n #cup\r\n elif obj == currentLocation.allObjects[2]:\r\n shelf.allObjects[2] = obj\r\n if obj not in currentLocation.noNeed2Draw:\r\n shelf.updateText(win, \"Got cup. Ew, the coffee-like liquid stinks.\")\r\n shelf.place(obj)\r\n currentLocation.noNeed2Draw.append(obj)\r\n #key2H\r\n elif obj == currentLocation.allObjects[4]:\r\n shelf.allObjects[4] = obj\r\n if obj not in currentLocation.noNeed2Draw:\r\n shelf.updateText(win, \"A key is hinding behind...\")\r\n shelf.place(obj)\r\n currentLocation.noNeed2Draw.append(obj)\r\n #key2R3\r\n elif obj == currentLocation.allObjects[5]:\r\n shelf.allObjects[5] = obj\r\n if obj not in currentLocation.noNeed2Draw:\r\n shelf.updateText(win, \"Got the key!\")\r\n shelf.place(obj)\r\n currentLocation.noNeed2Draw.append(obj)\r\n #cellphone\r\n elif obj == currentLocation.allObjects[6]:\r\n shelf.allObjects[6] = obj\r\n if obj not in currentLocation.noNeed2Draw:\r\n shelf.updateText(win, \"Someone's phone...still powers on.\")\r\n shelf.place(obj)\r\n currentLocation.noNeed2Draw.append(obj)\r\n else:\r\n #----------------PICK ITEMS FROM GRID\r\n #press button->to use:check obj(match)/loc/next getMouse\r\n if item1.contains(p)==True:\r\n item1.press()\r\n command = shelf.match(win,0)\r\n elif item2.contains(p)==True:\r\n item2.press()\r\n command = shelf.match(win,1)\r\n elif item3.contains(p)==True:\r\n item3.press()\r\n command = shelf.match(win,2)\r\n elif item4.contains(p)==True:\r\n item4.press()\r\n command = shelf.match(win,3)\r\n elif item5.contains(p)==True:\r\n item5.press()\r\n command = shelf.match(win,4)\r\n elif item6.contains(p)==True:\r\n item6.press()\r\n command = shelf.match(win,5)\r\n elif item7.contains(p)==True:\r\n item7.press()\r\n command = shelf.match(win,6)\r\n elif item8.contains(p)==True:\r\n item8.press()\r\n command = shelf.match(win,7)\r\n elif item9.contains(p)==True:\r\n item9.press()\r\n command = shelf.match(win,8)\r\n\r\n\r\n \r\n #update location in the indicator\r\n display.setText(currentLocation)\r\n\r\n\r\ngame()","repo_name":"garnetliu/conferenceProj_2013","sub_path":"conference.final.py","file_name":"conference.final.py","file_ext":"py","file_size_in_byte":58861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72222607626","text":"from dash import Dash, dash_table, dcc, html\nfrom dash.dependencies import Input, Output, State\nimport sys, os\nfrom pymatgen.analysis.wulff import WulffShape\nfrom pymatgen.core.structure import Lattice\nfrom pymatgen.ext.matproj import MPRester\nmpr = MPRester('mlcC4gtXFVqN9WLv')\n\nfrom scipy.spatial.qhull import QhullError\n\napp = Dash(__name__)\nserver = app.server\n\napp.layout = html.Div([\n\n # make a table for the abc lattice parameters\n dash_table.DataTable(\n id='abc',\n columns=[{'name': 'a', 'id': 'a',\n 'deletable': False, 'renamable': False},\n {'name': 'b', 'id': 'b',\n 'deletable': False, 'renamable': False},\n {'name': 'c', 'id': 'c',\n 'deletable': False, 'renamable': False},\n ],\n data=[{'a': 1, 'b': 1, 'c': 1}],\n editable=True,\n row_deletable=False,\n style_cell={\"textAlign\": \"center\", 'minWidth': '100px'}, \n fill_width=False\n ),\n \n # make a table for the angles lattice parameters\n dash_table.DataTable(\n id='angles',\n columns=[{'name': 'alpha', 'id': 'alpha',\n 'deletable': False, 'renamable': False},\n {'name': 'beta', 'id': 'beta',\n 'deletable': False, 'renamable': False},\n {'name': 'gamma', 'id': 'gamma',\n 'deletable': False, 'renamable': False},\n ],\n data=[{'alpha': 90, 'beta': 90, 'gamma': 90}],\n editable=True,\n row_deletable=False,\n style_cell={ \"textAlign\": \"center\", 'minWidth': '100px'}, \n fill_width=False\n ),\n\n # make a table for the miller index facets and surface energy\n dash_table.DataTable(\n id='hkl_and_surface_energy',\n columns=[{'name': 'h', 'id': 'h', 'deletable': False, 'renamable': False},\n {'name': 'k', 'id': 'k', 'deletable': False, 'renamable': False},\n {'name': 'l', 'id': 'l', 'deletable': False, 'renamable': False},\n {'name': 'Surface energy', 'id': 'surface_energy', \n 'deletable': False, 'renamable': False},\n {'name': 'Area fraction', 'id': 'area_frac', \n 'deletable': False, 'renamable': False}],\n data=[{'h': 1, 'k': 0, 'l': 0, 'surface_energy': 1, 'area_frac': 1}],\n editable=True,\n row_deletable=True,\n style_cell={\"textAlign\": \"center\", 'minWidth': '100px'},\n fill_width=False\n ),\n\n # add a button for adding more facets\n html.Button('Add surface', id='editing-rows-button', n_clicks=0),\n # add a box for inputting specific mpid\n dcc.Input(id=\"MPID\", type=\"text\", placeholder=\"MPID\", style={'marginRight':'10px'}, debounce=True),\n dcc.Graph(id='wulff_shape'),\n \n dcc.Upload(id='slab_vrun',\n children=html.Div(['Slab ', html.A('vasprun.xml file')]),\n style={'width': '100%', 'height': '60px', 'lineHeight': '60px',\n 'borderWidth': '1px', 'borderStyle': 'dashed', 'borderRadius': '5px',\n 'textAlign': 'center', 'margin': '10px'}),\n dcc.Upload(id='bulk_vrun',\n children=html.Div(['Bulk ', html.A('vasprun.xml file')]),\n style={'width': '100%', 'height': '60px', 'lineHeight': '60px',\n 'borderWidth': '1px', 'borderStyle': 'dashed', 'borderRadius': '5px',\n 'textAlign': 'center', 'margin': '10px'}),\n # make a table for the miller index facets for uploaded xml files\n dash_table.DataTable(id='hkl_xml', columns=[{'name': 'h', 'id': 'h', 'deletable': False, 'renamable': False},\n {'name': 'k', 'id': 'k', 'deletable': False, 'renamable': False},\n {'name': 'l', 'id': 'l', 'deletable': False, 'renamable': False}],\n data=[{'h': 1, 'k': 0, 'l': 0}], editable=True, row_deletable=False, \n style_cell={\"textAlign\": \"center\", 'minWidth': '100px'}, fill_width=False),\n html.Button('Calculate surface energy', id='calculate_button', n_clicks=0),\n])\n\n@app.callback(\n Output('wulff_shape', 'figure'),\n Output('hkl_and_surface_energy', 'data'),\n Output('abc', 'data'),\n Output('angles', 'data'),\n Output('MPID', 'value'), # returns nothing in order to clear input box\n Output('editing-rows-button', 'n_clicks'), # resets n_clicks to 0 to avoid creating new rows for each input\n Output('calculate_button','disabled'),\n Output('calculate_button', 'n_clicks'), # resets n_clicks to 0 to avoid creating new rows for each input\n\n Input('hkl_and_surface_energy', 'data'),\n Input('abc', 'data'),\n Input('angles', 'data'),\n Input('wulff_shape', 'figure'),\n Input(\"MPID\", \"value\"),\n Input('editing-rows-button', 'n_clicks'),\n Input('calculate_button', 'n_clicks'),\n Input('slab_vrun', 'contents'),\n Input('slab_vrun', 'filename'),\n Input('bulk_vrun', 'contents'),\n Input('bulk_vrun', 'filename'),\n Input('hkl_xml', 'data'))\ndef display_wulff_shape(hkl_and_se, abc, angles, old_wulff_shape, \n mpid=None, n_clicks=0, calculate=0, slab_vrun=None, slab_filename=None,\n bulk_vrun=None, bulk_filename=None, hkl_xml=None):\n \n columns=[{'name': 'h', 'id': 'h', 'deletable': False, 'renamable': False},\n {'name': 'k', 'id': 'k', 'deletable': False, 'renamable': False},\n {'name': 'l', 'id': 'l', 'deletable': False, 'renamable': False},\n {'name': 'Surface energy', 'id': 'surface_energy', \n 'deletable': False, 'renamable': False},\n {'name': 'Area fraction', 'id': 'area_frac', \n 'deletable': False, 'renamable': False}] \n \n if slab_vrun:\n content_type, content_string = slab_vrun.split(',')\n decoded = base64.b64decode(content_string)\n with NamedTemporaryFile(suffix=slab_filename) as tmp:\n tmp.write(decoded)\n tmp.flush()\n slab_vrun = Vasprun(tmp.name)\n slab_energy = slab_vrun.final_energy\n slab = slab_vrun.final_structure\n if bulk_vrun:\n content_type, content_string = bulk_vrun.split(',')\n decoded = base64.b64decode(content_string)\n with NamedTemporaryFile(suffix=bulk_filename) as tmp:\n tmp.write(decoded)\n tmp.flush()\n bulk_vrun = Vasprun(tmp.name)\n bulk_energy = bulk_vrun.final_energy\n bulk = bulk_vrun.final_structure\n if calculate > 0:\n hkl = (int(hkl_xml[0]['h']), int(hkl_xml[0]['k']), int(hkl_xml[0]['l']))\n slabentry = SlabEntry(slab, slab_energy, hkl)\n calc_surface_energy = slabentry.surface_energy(ComputedStructureEntry(bulk, bulk_energy))\n hkl_and_se.append({'h': hkl[0], 'k': hkl[1], 'l': hkl[-1],\n 'surface_energy': '%.3f' %(calc_surface_energy*EV_PER_ANG2_TO_JOULES_PER_M2)})\n\n if n_clicks > 0:\n hkl_and_se.append({c['id']: '' for c in columns if c['id'] != 'area_frac'})\n if mpid:\n surface_data = mpr.get_surface_data(mpid)\n miller_indices = [tuple(surf['miller_index']) for surf in surface_data['surfaces']]\n surface_energies = [surf['surface_energy'] for surf in surface_data['surfaces']]\n \n # reset lattice parameter table for this particular mpid\n latt = mpr.get_structure_by_material_id(mpid, conventional_unit_cell=True).lattice \n abc = [{'a': latt.a, 'b': latt.b, 'c': latt.c}]\n angles = [{'alpha': latt.alpha, 'beta': latt.beta, 'gamma': latt.gamma}]\n\n # reset the table for this particular mpid\n hkl_and_se=[]\n for i, hkl in enumerate(miller_indices):\n hkl_and_se.append({'h': hkl[0], 'k': hkl[1], 'l': hkl[-1], 'surface_energy': '%.3f' %(surface_energies[i])})\n \n else:\n # only consider rows with appropriate values for h, k, l and surface energy, ignore otherwise \n miller_indices = [(int(row['h']), int(row['k']), int(row['l'])) for row in hkl_and_se \n if all([v != '' and v != None for v in row.values()])]\n surface_energies = [float(row['surface_energy']) for row in hkl_and_se \n if all([v != '' and v != None for v in row.values()])]\n latt = Lattice.from_parameters(float(abc[0]['a']), float(abc[0]['b']), float(abc[0]['c']), \n float(angles[0]['alpha']), float(angles[0]['beta']), float(angles[0]['gamma']))\n \n slab_vrun = None if calculate > 0 else slab_vrun\n bulk_vrun = None if calculate > 0 else bulk_vrun\n \n try:\n wulff = WulffShape(latt, miller_indices, surface_energies)\n # add the area fractions\n for i, row in enumerate(hkl_and_se):\n if all([v != '' and v != None for v in row.values()]):\n hkl_and_se[i]['area_frac'] = '%.3f' %(wulff.area_fraction_dict[tuple([int(row['h']), \n int(row['k']), \n int(row['l'])])])\n return wulff.get_plotly(), hkl_and_se, abc, angles, '', 0, slab_vrun == None or bulk_vrun == None, 0\n except QhullError:\n # If a Wulff shape cannot be enclosed, return the previous Wulff shape\n return old_wulff_shape, hkl_and_se, abc, angles, '', 0, slab_vrun == None or bulk_vrun == None, 0\n \nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"CifLord/lykoi","sub_path":"wulff_builder.py","file_name":"wulff_builder.py","file_ext":"py","file_size_in_byte":9600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73106918346","text":"###############################################################\n#\n# Job options file for Evgen MadGraph Pythia8 Monopole Generation\n# A. Lionti, 2015-09-22\n#==============================================================\n\n#--------------------------------------------------------------\n# MadGraph on the fly\n#--------------------------------------------------------------\nfrom MadGraphControl.MadGraphUtils import *\n\nif hasattr(runArgs,'ecmEnergy'):\n beamEnergy = runArgs.ecmEnergy / 2.\nelse:\n beamEnergy = 6500.\n\nsafefactor=1.1 #generate extra 10% events in case any fail showering\nnevents = 5000*safefactor\nif runArgs.maxEvents > 0: nevents = runArgs.maxEvents*safefactor\n\n\nmasses = {}\nmasses['4110000'] = float(mass)\n\n\n# writing proc card for MG\nfcard = open('proc_card_mg5.dat','w')\nfcard.write(\"\"\"\nimport model mono_spinhalf\ngenerate p p > mm+ mm-\noutput -f\n\"\"\")\nfcard.close()\n\n#Process_dir\nprocess_dir = new_process()\n\n#create the param_card\nif os.access('param_card.dat',os.R_OK):\n print(\"Deleting old param_card.dat\")\n os.remove('param_card.dat')\nparam_card = \"%s/Cards/param_card.dat\" % process_dir\n\t\nparam_card_extras={'GCH':{'GCH':'%s' % gcharge} }\n\nif( build_param_card(param_card_old=param_card,param_card_new='param_card.dat',masses=masses,params=param_card_extras) == -1):\n raise RuntimeError(\"Could not create param_card.dat\")\n\t\n#create the run card:i\nrun_card_extras = {'lhe_version':'1.0' , 'ptheavy':'%s' % ptcut}\n\nif os.access('run_card.dat',os.R_OK):\n print(\"Deleting old run_card.dat\")\n os.remove('run_card.dat')\nrun_card = get_default_runcard(proc_dir=process_dir)\nif build_run_card(run_card_old=run_card,run_card_new='run_card.dat',nevts=nevents,rand_seed=runArgs.randomSeed,beamEnergy=beamEnergy,extras=run_card_extras)==-1:\n raise RuntimeError(\"Could not create run_card.dat\")\n\n\n# generating events in MG\ngenerate(run_card_loc='run_card.dat',param_card_loc='param_card.dat',mode=0,njobs=1,run_name='Test',proc_dir=process_dir)\n\nstringy = 'madgraph.'+str(runArgs.runNumber)+'.MadGraphPythia8EvtGen_A14NNPDF23LO_DYMonopole'\n\t\narrange_output(run_name='Test',proc_dir=process_dir,outputDS=stringy+'._00001.events.tar.gz')\n\nevgenConfig.inputfilecheck = stringy\t\t\nrunArgs.inputGeneratorFile=stringy+'._00001.events.tar.gz'\n\n\n#--------------------------------------------------------------\n# General MC15 configuration\n#--------------------------------------------------------------\ninclude( \"MC15JobOptions/Pythia8_A14_NNPDF23LO_EvtGen_Common.py\" )\ninclude( \"MC15JobOptions/Pythia8_MadGraph.py\" )\n\n#--------------------------------------------------------------\n# Configuration for EvgenJobTransforms\n#--------------------------------------------------------------\nevgenConfig.description = \"Drell-Yan magnetic monopole generation for Mass=%s, Gcharge=%s with MadGraph+Pythia8 and the A14_NNPDF23LO tune in MC15\" % (mass,gcharge)\nevgenConfig.keywords = [\"exotic\", \"magneticMonopole\", \"drellYan\", \"BSM\"]\nevgenConfig.contact = [\"anlionti@cern.ch\"]\nevgenConfig.process = \"p p -> mm+ mm-\"\nevgenConfig.specialConfig = 'MASS=%s;GCHARGE=%s;preInclude=SimulationJobOptions/preInclude.Monopole.py' % (mass,gcharge)\n\n#--------------------------------------------------------------\n# Edit PDGTABLE.MeV with monopole mass\n#--------------------------------------------------------------\nALINE1=\"M 4110000 %s.E+03 +0.0E+00 -0.0E+00 Monopole 0\" % (mass)\nALINE2=\"W 4110000 0.E+00 +0.0E+00 -0.0E+00 Monopole 0\"\n\nimport os\nimport sys\n\npdgmod = os.path.isfile('PDGTABLE.MeV')\nif pdgmod is True:\n os.remove('PDGTABLE.MeV')\nos.system('get_files -data PDGTABLE.MeV')\nf=open('PDGTABLE.MeV','a')\nf.writelines(str(ALINE1))\nf.writelines('\\n')\nf.writelines(str(ALINE2))\nf.writelines('\\n')\nf.close()\n\ndel ALINE1\ndel ALINE2\n\n#--------------------------------------------------------------\n# Edit G4particle_whitelist.txt with monopole\n#--------------------------------------------------------------\n\nALINE1=\"4110000 mm %s.E+06 (Mev/c) lepton %s\" % (mass,gcharge)\nALINE2=\"-4110000 mmbar %s.E+06 (Mev/c) lepton -%s\" % (mass,gcharge)\n\nimport os\nimport sys\n\npdgmod = os.path.isfile('G4particle_whitelist.txt')\nif pdgmod is True:\n os.remove('G4particle_whitelist.txt')\nos.system('get_files -data G4particle_whitelist.txt')\nf=open('G4particle_whitelist.txt','a')\nf.writelines(str(ALINE1))\nf.writelines('\\n')\nf.writelines(str(ALINE2))\nf.writelines('\\n')\nf.close()\n\ndel ALINE1\ndel ALINE2\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"btamadio/MadGraphProduction","sub_path":"MC15JobOptions/common/MadGraph/MadGraphControl_MGPy8EG_A14N23LO_DYMonopole.py","file_name":"MadGraphControl_MGPy8EG_A14N23LO_DYMonopole.py","file_ext":"py","file_size_in_byte":4532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74311866505","text":"from functools import wraps\n\nfrom flask_jwt_extended import get_jwt\n\nfrom app.comun.excepciones import ExcepcionSinAutorizacion\n\nROLE_ADMIN = 'ROLE_ADMIN'\n\n\ndef admin_requerido(f):\n @wraps(f)\n def decorated_function(*args, **kws):\n validar_role_permitidos([ROLE_ADMIN])\n return f(*args, **kws)\n\n return decorated_function\n\n\ndef validar_role_permitidos(roles_permitido):\n roles_token = get_jwt()[\"authorities\"]\n autorizado = False\n for role_token in roles_token:\n for role_permitido in roles_permitido:\n if role_token == role_permitido:\n autorizado = True\n\n if not autorizado:\n raise ExcepcionSinAutorizacion('No tiene permisos para consultar el recurso solicitado')\n","repo_name":"crisgicr/mesa-ayuda-backend","sub_path":"app/seguridad/decoradores.py","file_name":"decoradores.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25510132895","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 6 16:48:41 2022\r\n\r\n@author: fabic\r\n\r\n\"\"\"\r\n\r\n\r\n# Import required libraries:\r\nimport logging as log\r\nimport xlwt\r\nimport numpy as np\r\nimport scipy.io\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt \r\nfrom copy import deepcopy\r\nfrom tqdm import tqdm\r\nfrom scipy.interpolate import interp1d\r\nimport openpyxl\r\n\r\n\r\n# Read data into pandas dataframe:\r\nDF = pd.read_excel('shape-internal_snapshot_1670341103.xlsx')\r\nDF.head(5)\r\n\r\n# Define region classification\r\nRegions_I = ['Brazil','Canada','Central Europe','China region','Eastern Africa', #0-4\r\n 'India','Indonesia','Japan','Korea','Middle East', #5-9\r\n 'Mexico','Northern Africa','Oceania','Rest of Central America','Rest of South Africa', #10-14\r\n 'Rest of South America','Rest of South Asia','Russia','South Africa','Southeast Asia', #15-19\r\n 'Kazakhstan region','Turkey','Ukraine region','USA','Western Africa' #20-24\r\n 'Western Europe'] #25\r\n \r\nModel_I = ['IMAGE 3.3']\r\nRegions_I = [Model_I[0]+'|'+region_i for region_i in Regions_I]\r\n\r\nRegions_O = ['World']\r\nRegions_R = ['R32CAN','R32CHN','R32EU12-M','R32IND','R32JPN','R32USA','France','Germany','Italy','Poland','Spain','UK','Oth_R32EU15','Oth_R32EU12-H','R5.2OECD_Other','R5.2REF_Other','R5.2ASIA_Other','R5.2MNF_Other','R5.2LAM_Other','R5.2SSA_Other']\r\n\r\nR_DictLookUp = {\r\n 'R32CAN': ['Canada'] ,\r\n 'R32CHN': ['China region'],\r\n 'R32EU12-M': ['Central Europe'],\r\n 'R32IND': ['India'],\r\n 'R32JPN': ['Japan'],\r\n 'R32USA': ['USA'],\r\n 'France': ['Western Europe'],\r\n 'Germany': ['Western Europe'],\r\n 'Italy': ['Western Europe'],\r\n 'Poland': ['Central Europe'],\r\n 'Spain': ['Western Europe'],\r\n 'UK': ['Western Europe'],\r\n 'Oth_R32EU15': ['Western Europe'],\r\n 'Oth_R32EU12-H': ['Central Europe'],\r\n 'R5.2OECD_Other': ['Oceania', 'Mexico','Korea','Turkey'],\r\n 'R5.2REF_Other': ['Russia','Kazakhstan region','Ukraine region'],\r\n 'R5.2ASIA_Other': ['Indonesia','Rest of South Asia','Southeast Asia'],\r\n 'R5.2MNF_Other': ['Middle East','Northern Africa'],\r\n 'R5.2LAM_Other': ['Brazil','Rest of Central America','Rest of South America'],\r\n 'R5.2SSA_Other': ['South Africa','Rest of South Africa','Western Africa']\r\n }\r\n\r\n#merge model and region name\r\nfor key in R_DictLookUp.keys():\r\n R_DictLookUp[key] = [Model_I[0]+'|'+ r for r in R_DictLookUp[key]]\r\n\r\n\r\nO_DictLookUp = {'World':['Brazil','Canada','Central Europe','China region','Eastern Africa', \r\n 'India','Indonesia','Japan','Korea','Middle East', \r\n 'Mexico','Northern Africa','Oceania','Rest of Central America','Rest of South Africa', \r\n 'Rest of South America','Rest of South Asia','Russia','South Africa','Southeast Asia', \r\n 'Kazakhstan region','Turkey','Ukraine region','USA','Western Africa' , \r\n 'Western Europe'] }\r\n\r\nfor key in O_DictLookUp.keys():\r\n O_DictLookUp[key] = [Model_I[0]+'|'+ r for r in O_DictLookUp[key]]\r\n \r\n\r\n\r\n# Define scenario classification\r\nScenario_I = ['SSP2','SDP_EI-1p5C','SDP_MC-1p5C','SDP_RC-1p5C']\r\nRCPScen_R = ['Baseline(unmitigated)','RCP2.6']\r\n\r\nS_DictLookUp = {\r\n 'Baseline(unmitigated)': ['SSP2'],\r\n 'RCP2.6': ['SDP_EI-1p5C']\r\n } \r\n\r\n# Define time classification\r\nYears_I = [2005,2010,2015,2020,2025,2030,2035,2040,2045,2050,2060,2070,2080,2090,2100]\r\nTime_R = np.arange(2005,2101,1)\r\nTimeL_R = [i for i in Time_R] \r\n\r\n\r\n# Define electricity genreation classification\r\nIndustry_I = np.array(['Secondary Energy|Electricity|Biomass|w/ CCS', #0\r\n 'Secondary Energy|Electricity|Biomass|w/o CCS', #1\r\n 'Secondary Energy|Electricity|Coal|w/ CCS', #2\r\n 'Secondary Energy|Electricity|Coal|w/o CCS', #3\r\n 'Secondary Energy|Electricity|Gas|w/ CCS', #4\r\n 'Secondary Energy|Electricity|Gas|w/o CCS', #5\r\n 'Secondary Energy|Electricity|Geothermal', #6\r\n 'Secondary Energy|Electricity|Hydro', #7\r\n 'Secondary Energy|Electricity|Nuclear', #8\r\n 'Secondary Energy|Electricity|Oil|w/ CCS', #9\r\n 'Secondary Energy|Electricity|Oil|w/o CCS', #10\r\n 'Secondary Energy|Electricity|Solar|CSP', #11\r\n 'Secondary Energy|Electricity|Solar|PV', #12\r\n 'Secondary Energy|Electricity|Wind|Offshore', #13\r\n 'Secondary Energy|Electricity|Wind|Onshore' #14\r\n ]) # list 15 IMAGE electricity industry, in EJ/yr\r\n \r\nIndustry_R = np.array(['solar photovoltaic power plant', #0\r\n 'concentrating solar power plant (CSP)', #1\r\n 'wind power plant onshore', #2\r\n 'wind power plant offshore', #3\r\n 'hydro power plant', #4\r\n 'nuclear power plant', #5\r\n 'coal power plant', #6\r\n 'coal power plant without abatement measures',#7\r\n 'bio powerplant', #8\r\n 'oil power plant', #9\r\n 'geothermal power plant', #10\r\n 'IGCC power plant', #11\r\n 'light oil combined cycle', #12\r\n 'gas combined cycle power plant', #13\r\n 'advanced coal power plant with CCS', #14\r\n 'coal power plant with CCS', #15\r\n 'biomass power plant with CCS', #16\r\n 'gas combined cycle power plant with CCS'#17\r\n ]) # list of 18 electricity industry in RECC\r\n\r\nInd_DictLookUp = {\r\n 'solar photovoltaic power plant': ['Secondary Energy|Electricity|Solar|PV'], \r\n 'concentrating solar power plant (CSP)': ['Secondary Energy|Electricity|Solar|CSP'],\r\n 'wind power plant onshore': ['Secondary Energy|Electricity|Wind|Onshore'], \r\n 'wind power plant offshore': ['Secondary Energy|Electricity|Wind|Offshore'], \r\n 'hydro power plant': ['Secondary Energy|Electricity|Hydro'], \r\n 'nuclear power plant': ['Secondary Energy|Electricity|Nuclear'], \r\n 'coal power plant': ['Secondary Energy|Electricity|Coal|w/o CCS'], \r\n 'coal power plant without abatement measures': [],\r\n 'bio powerplant': ['Secondary Energy|Electricity|Biomass|w/o CCS'], \r\n 'oil power plant': ['Secondary Energy|Electricity|Oil|w/ CCS', \r\n 'Secondary Energy|Electricity|Oil|w/o CCS'], \r\n 'geothermal power plant': ['Secondary Energy|Electricity|Geothermal'], \r\n 'IGCC power plant': [], \r\n 'light oil combined cycle': [], \r\n 'gas combined cycle power plant': ['Secondary Energy|Electricity|Gas|w/o CCS',], \r\n 'advanced coal power plant with CCS': [], \r\n 'coal power plant with CCS': ['Secondary Energy|Electricity|Coal|w/ CCS'], \r\n 'biomass power plant with CCS': ['Secondary Energy|Electricity|Biomass|w/ CCS'], \r\n 'gas combined cycle power plant with CCS': ['Secondary Energy|Electricity|Gas|w/ CCS']\r\n }\r\n \r\n# extract results\r\nEnergy_Total_R = np.zeros((len(Regions_R),len(RCPScen_R),len(Industry_R),len(Years_I))) #riRt\r\nEnergy_Mix_R = np.zeros((len(Regions_R),len(RCPScen_R),len(Industry_R),len(Years_I))) #riRt\r\nEnergy_Total_O = np.zeros((len(Regions_O),len(RCPScen_R),len(Industry_R),len(Years_I))) #oiRt\r\nEnergy_Mix_O = np.zeros((len(Regions_O),len(RCPScen_R),len(Industry_R),len(Years_I))) #oiRt\r\n\r\n\r\n# Collect energy with RECC classification\r\nfor r in range(0,len(Regions_R)):\r\n for R in range(0,len(RCPScen_R)):\r\n for i in range(0,len(Industry_R)):\r\n D = DF.loc[DF['Scenario'].isin(S_DictLookUp[RCPScen_R[R]]) & DF['Region'].isin(R_DictLookUp[Regions_R[r]]) & DF['Model'].isin(Model_I) & DF['Variable'].isin(Ind_DictLookUp[Industry_R[i]]) ] # extract total energy supplied, in EJ/ySL\r\n D.drop(['Model','Scenario','Region','Variable','Unit'], axis =1, inplace=True)\r\n Energy_Total_R[r,R,i,:] = D.sum(axis=0)\r\nEnergy_Total_O[0,:,:,:] = Energy_Total_R.sum(axis=0)\r\n\r\n# From total energy to annual share\r\nfor r in range(0,len(Regions_R)):\r\n for R in range(0,len(RCPScen_R)):\r\n tot_energy_R = Energy_Total_R[r,R,:,:].sum(axis=0)\r\n for i in range(0,len(Industry_R)):\r\n Energy_Mix_R[r,R,i,:] = np.divide(Energy_Total_R[r,R,i,:],tot_energy_R)\r\n\r\nfor R in range(0,len(RCPScen_R)):\r\n tot_Energy_O = Energy_Total_O[0,R,:,:].sum(axis=0)\r\n for i in range(0,len(Industry_R)):\r\n Energy_Mix_O[0,R,i,:] = np.divide(Energy_Total_O[0,R,i,:],tot_Energy_O)\r\n \r\n# interpolate\r\nEnergy_Mix_R_interp = np.zeros((len(Regions_R),len(RCPScen_R),len(Industry_R),len(Time_R))) #riRt\r\nEnergy_Mix_O_interp = np.zeros((len(Regions_O),len(RCPScen_R),len(Industry_R),len(Time_R))) #oiRt \r\n\r\nfor r in range(0,len(Regions_R)):\r\n for R in range(0,len(RCPScen_R)):\r\n for i in range(0,len(Industry_R)): \r\n f_mix = interp1d(Years_I, Energy_Mix_R[r,R,i,:] , kind='linear')\r\n Energy_Mix_R_interp[r,R,i,:] = f_mix(Time_R)\r\n \r\nfor R in range(0,len(RCPScen_R)):\r\n for i in range(0,len(Industry_R)):\r\n f_mix = interp1d(Years_I, Energy_Mix_O[0,R,i,:] , kind='linear')\r\n Energy_Mix_O_interp[0,R,i,:] = f_mix(Time_R)\r\n \r\n \r\n# export results \r\nHeaders= ['Region','RCP_Scen','Industry']\r\nfor t in Time_R:\r\n Headers.append(str(t))\r\n\r\nResults_R = openpyxl.Workbook()\r\nsheet_R = Results_R.active\r\nsheet_R.append(Headers)\r\nRix = 2\r\nfor r in range(0,len(Regions_R)):\r\n for R in range(0,len(RCPScen_R)):\r\n for i in range(0,len(Industry_R)):\r\n sheet_R.cell(Rix,1).value = Regions_R[r]\r\n sheet_R.cell(Rix,2).value = RCPScen_R[R]\r\n sheet_R.cell(Rix,3).value = Industry_R[i]\r\n for t in range(0,len(Time_R)):\r\n sheet_R.cell(Rix,4+t).value = Energy_Mix_R_interp[r,R,i,t]\r\n Rix +=1\r\n \r\nResults_O = openpyxl.Workbook()\r\nsheet_O = Results_O.active\r\nsheet_O.append(Headers)\r\nRix = 2\r\nfor R in range(0,len(RCPScen_R)):\r\n for i in range(0,len(Industry_R)):\r\n sheet_O.cell(Rix,1).value = 'World'\r\n sheet_O.cell(Rix,2).value = RCPScen_R[R]\r\n sheet_O.cell(Rix,3).value = Industry_R[i]\r\n for t in range(0,len(Time_R)):\r\n sheet_O.cell(Rix,4+t).value = Energy_Mix_O_interp[0,R,i,t]\r\n Rix +=1\r\n \r\nResults_R.save('3_SHA_ElectricityMix.xlsx')\r\nResults_O.save('3_SHA_ElectricityMix_World.xlsx')\r\n\r\nprint('Done')\r\n# The end.","repo_name":"CarrerF/RECC_Materials_Scenario","sub_path":"Data_processing/Electricity_mix/parse_ElectricityMix.py","file_name":"parse_ElectricityMix.py","file_ext":"py","file_size_in_byte":11503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39730040567","text":"'''\nCreated on 09/04/2012\n\n@author: ender3\n'''\nfrom board import Board\nfrom player import Player\nfrom util.circularList import CircularList\nfrom card import Card\n\nclass Dominion(object):\n '''\n class that are going to communicate all the data from the application to the WS layer\n '''\n \n MAX_PLAYERS = 4\n STATE_OPEN = 1\n STATE_ACTION = 2\n STATE_BUY = 3\n STATE_FINISH = 4\n\n def __init__(self):\n '''\n Constructor\n '''\n self.board=Board()\n self.order=''\n self.admin=''\n \n self.players = {}\n self.number_of_players = Dominion.MAX_PLAYERS\n self.next_player = None\n self.state = self.STATE_OPEN\n self.templates = {'game' : 'dominion.html'}\n \n def newPlayer(self,name):\n '''\n Create and add a new plater to the game, check if the game is full\n '''\n if len(self.players)>3:\n return False,\"maxPlayers\"\n elif name in self.players:\n return False,\"nameExists\"\n else:\n self.players[name]=Player(name,self.board)\n return True\n \n def playerLeft(self,name):\n '''\n Create and add a new plater to the game, check if the game is full\n '''\n self.players.pop(name)\n \n def initGame(self):\n '''\n Initialize the game, check if there are enough players\n '''\n if len(self.players)<2:\n return False, \"minPlayers\"\n else:\n self.order=CircularList(self.players.keys())\n self.board.deal_cards(len(self.players))\n for player in self.players.itervalues():\n cards_deck=[]\n (result,card_num)=self.board.get_cards(Card.ESTATE, 3)\n if(result):\n for i in range(3):\n cards_deck.append(Card.ESTATE)\n (result,card_num)=self.board.get_cards(Card.COPPER, 7)\n if(result):\n for i in range(7):\n cards_deck.append(Card.COPPER)\n player.initialize_deck(cards_deck)\n player.draw_cards(5)\n self.state=self.STATE_ACTION\n return True \n \n def end_action_phase(self):\n self.state=self.STATE_BUY\n \n def end_buy_phase(self):\n self.players[self.order.current()].discard_hand()\n self.players[self.order.current()].draw_cards(5) \n self.order.next()\n self.state=self.STATE_ACTION\n \n def buy(self,card_name):\n if(self.board.board_cards.__contains__(card_name)):\n board_deck=self.board.board_cards[card_name]\n if(self.players[self.order.current()].coins >= self.board.card_collection.get_card(card_name).cost):\n if(card_name != Card.CURSE):\n (result,num_cards_left)=self.board.get_cards(card_name, 1)\n if(result):\n self.players[self.order.current()].add_new_card(card_name)\n self.players[self.order.current()].coins=self.players[self.order.current()].coins-self.board.card_collection.get_card(card_name).cost\n return True,card_name\n else:\n return False,\"There's no \"+card_name+\" left\"\n else:\n return False,\"You cannot buy a Curse\"\n else:\n return False, \"You don't have enough money\"\n else:\n return False, \"That card is not on the board\"\n def endGame(self):\n winner=''\n winner_points = 0\n message=''\n for player in self.players:\n if (player.victory_points>winner_points):\n winner = player.name\n if (player.victory_points==winner_points):\n winner = winner + ' and ' + player.name\n if (message==''):\n message = player.name+': '+player.victory_points\n else:\n message = message +' - ' +player.name+': '+player.victory_points\n ","repo_name":"luis-ibanez/Dominion","sub_path":"src/app/dominion/dominion.py","file_name":"dominion.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19185335131","text":"from collections import defaultdict\n\nclass Solution:\n def group_anagrams(self, strs):\n \"\"\"\n :type strs: list[str]\n :rtype list[list[str]]\n \"\"\"\n if not strs:\n return []\n\n table = defaultdict(list)\n for string in strs:\n key = tuple(sorted(string))\n table[key].append(string)\n\n return list(table.values())\n\nif __name__ == '__main__':\n test = [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]\n sol = Solution()\n print(sol.group_anagrams(test))\n","repo_name":"pololee/oj-leetcode","sub_path":"companies/facebook/p49/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7300038188","text":"\nimport RPi.GPIO as GPIO\nimport time\n\n#set gpio numbering mode\nGPIO.setmode(GPIO.BOARD)\n\n# LEFT EYE UP DOWN\nGPIO.setup(11,GPIO.OUT)\nservo_le_ud = GPIO.PWM(11, 50) #11 is pin, 50 = 50Hz pulse\n\n# RIGHT EYE UP DOWN\nGPIO.setup(22, GPIO.OUT)\nservo_re_ud = GPIO.PWM(22, 50)\n\n#start PWM running, but with value of 0 (pulse off)\n# servo.start(dc)\n# where dc is the duty cycle (0.0 <= dc <= 100.0)\nservo_le_ud.start(0)\nservo_re_ud.start(0)\nprint (\"Waiting for 2 seconds\")\ntime.sleep(2)\n\n# front to back 180 degree\nprint (\"front to back 45\")\nle_ud_duty =2\nre_ud_duty = 12\nwhile le_ud_duty <= 13 & re_ud_duty >= 2:\n servo_le_ud.ChangeDutyCycle(le_ud_duty)\n servo_re_ud.ChangeDutyCycle(re_ud_duty)\n time.sleep(1)\n le_ud_duty += 2\n re_ud_duty -= 2\n\n\n#clean things up at the end\n#doesn't move the servo\nservo_le_ud.stop()\nservo_re_ud.stop()\nGPIO.cleanup()\nprint(\"Goodbye.\")","repo_name":"JIbald/Doorman_raspberry","sub_path":"src/eyes_servos.py","file_name":"eyes_servos.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72866344584","text":"# ----------------------------------------\n# The dataset for train that is used for the main training\n# ----------------------------------------\n\nfrom __future__ import print_function, division\nimport os\nimport torch\nimport pandas as pd\nimport cv2\nimport multiprocessing\nfrom skimage import io\nfrom PIL import Image\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom datasets.transformmultiGT import *\nfrom utils.imutils import *\nfrom utils.registry import DATASETS\nfrom datasets.BaseMultiwGTauginfoDataset import BaseMultiwGTauginfoDataset\nimport torch.nn.functional as F\nfrom utils.iou_computation import update_iou_stat, compute_iou\n\n\n@DATASETS.register_module\nclass VOCTrainwsegDataset(BaseMultiwGTauginfoDataset):\n def __init__(self, cfg, period, transform='none'):\n super(VOCTrainwsegDataset, self).__init__(cfg, period, transform)\n self.dataset_name = 'VOC%d' % cfg.DATA_YEAR\n self.root_dir = os.path.join(cfg.ROOT_DIR, 'data', 'VOCdevkit')\n self.dataset_dir = os.path.join(self.root_dir, self.dataset_name)\n self.rst_dir = os.path.join(self.root_dir, 'results', self.dataset_name, 'Segmentation')\n self.eval_dir = os.path.join(self.root_dir, 'eval_result', self.dataset_name, 'Segmentation')\n self.img_dir = os.path.join(self.dataset_dir, 'JPEGImages')\n # print(self.img_dir)\n self.ann_dir = os.path.join(self.dataset_dir, 'Annotations')\n self.seg_dir = os.path.join(self.dataset_dir, 'SegmentationClass')\n self.seg_dir_gt = os.path.join(self.dataset_dir, 'SegmentationClassAug')\n self.set_dir = os.path.join(self.dataset_dir, 'ImageSets', 'Segmentation')\n if cfg.DATA_PSEUDO_GT:\n self.pseudo_gt_dir = cfg.DATA_PSEUDO_GT\n else:\n self.pseudo_gt_dir = os.path.join(self.root_dir, 'pseudo_gt', self.dataset_name, 'Segmentation')\n\n file_name = None\n if cfg.DATA_AUG and 'train' in self.period:\n file_name = self.set_dir + '/' + period + 'aug.txt'\n else:\n file_name = self.set_dir + '/' + period + '.txt'\n df = pd.read_csv(file_name, names=['filename'])\n self.name_list = df['filename'].values\n # print(self.name_list[1])\n if self.dataset_name == 'VOC2012':\n self.categories = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow',\n 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',\n 'train', 'tvmonitor']\n self.coco2voc = [[0], [5], [2], [16], [9], [44], [6], [3], [17], [62],\n [21], [67], [18], [19], [4], [1], [64], [20], [63], [7], [72]]\n\n self.num_categories = len(self.categories) + 1\n self.cmap = self.__colormap(len(self.categories) + 1)\n\n # to record the previous prediction\n self.prev_pred_dict = {}\n self.seg_dict = {}\n self.init_seg_dict()\n\n # save all the segmentation information in the mem as initialization\n def init_seg_dict(self):\n for idx in range(len(self.name_list)):\n name = self.name_list[idx]\n seg_file = self.pseudo_gt_dir + '/' + name + '.png'\n\n segmentation1 = Image.open(seg_file)\n\n segmentation1 = torch.tensor(np.array(segmentation1),dtype=torch.long).unsqueeze(0) # 1,h,w\n self.seg_dict[idx] = segmentation1\n\n # the core logic to update the pseudo annotation\n def update_seg_dict(self, IoU_npl_indx, mask_threshold=0.8):\n for idx in range(len(self.name_list)):\n self.update_allclass(idx, IoU_npl_indx, mask_threshold, 'single', class_constraint=True, update_or_mask='update', update_all_bg_img=True)\n\n\n def update_allclass(self, idx, IoU_npl_indx, mask_threshold, IoU_npl_constraint, class_constraint=True, update_or_mask='update', update_all_bg_img=False):\n seg_label = self.seg_dict[idx] # 1,h,w\n b, h, w = seg_label.size() # 1,h,w\n\n # if seg label does not belong to the set of class that needs to be updated (exclude the background class), return\n if set(np.unique(seg_label.numpy())).isdisjoint(set(IoU_npl_indx[1:])):\n # only the background in the pseudo label\n # if update_all_bg_img and len(np.unique(seg_label.numpy()))==1 and np.unique(seg_label.numpy())[0]==0:\n if update_all_bg_img and not (set(np.unique(seg_label.numpy()))-set(np.array([0,255]))):\n pass\n else:\n return\n\n seg_argmax, seg_prediction_max_prob = self.prev_pred_dict[idx]\n\n # if the class_constraint==True and seg label has foreground class\n # we prevent using predicted class that is not in the pseudo label to correct the label\n if class_constraint == True and (set(np.unique(seg_label[0].numpy())) - set(np.array([0, 255]))):\n for i_batch in range(b):\n unique_class = torch.unique(seg_label[i_batch])\n # print(unique_class)\n indx = torch.zeros((h, w), dtype=torch.long)\n for element in unique_class:\n indx = indx | (seg_argmax[i_batch] == element)\n seg_argmax[i_batch][(indx == 0)] = 255\n\n seg_mask_255 = (seg_argmax == 255)\n\n # seg_change_indx means which pixels need to be updated,\n # find index where prediction is different from label,\n # and it is not a ignored index and confidence is larger than threshold\n seg_change_indx = (seg_label != seg_argmax) & (~seg_mask_255) & (\n seg_prediction_max_prob > mask_threshold)\n\n # when set to \"both\", only when predicted class and pseudo label both existed in the set, the label would be corrected\n # this is a conservative way, during our whole experiments, IoU_npl_constraint is always set to be \"single\",\n # this is retained here in case user may find in useful for their dataset\n if IoU_npl_constraint == 'both':\n class_indx_seg_argmax = torch.zeros((b, h, w), dtype=torch.bool)\n class_indx_seg_label = torch.zeros((b, h, w), dtype=torch.bool)\n\n for element in IoU_npl_indx:\n class_indx_seg_argmax = class_indx_seg_argmax | (seg_argmax == element)\n class_indx_seg_label = class_indx_seg_label | (seg_label == element)\n seg_change_indx = seg_change_indx & class_indx_seg_label & class_indx_seg_argmax\n\n # when set to \"single\", when predicted class existed in the set, the label would be corrected, no need to consider pseudo label\n # e.g. when person belongs to the set, motor pixels in the pseudo label can be updated to person even if motor is not in set\n elif IoU_npl_constraint == 'single':\n class_indx_seg_argmax = torch.zeros((b, h, w), dtype=torch.bool)\n\n for element in IoU_npl_indx:\n class_indx_seg_argmax = class_indx_seg_argmax | (seg_argmax == element)\n seg_change_indx = seg_change_indx & class_indx_seg_argmax\n\n # if the foreground class portion is too small, do not update\n seg_label_clone = seg_label.clone()\n seg_label_clone[seg_change_indx] = seg_argmax[seg_change_indx]\n if torch.sum(seg_label_clone!=0) < 0.5 * torch.sum(seg_label!=0) and torch.sum(seg_label_clone==0)/(b*h*w)>0.95:\n return\n\n # update or mask 255\n if update_or_mask == 'update':\n seg_label[seg_change_indx] = seg_argmax[seg_change_indx] # update all class of the pseudo label\n else:\n # mask the pseudo label for 255 without computing the loss\n seg_label[seg_change_indx] = (torch.ones((b, h, w), dtype=torch.long) * 255)[\n seg_change_indx] # the updated pseudo label\n\n self.seg_dict[idx] = seg_label\n\n def __len__(self):\n return len(self.name_list)\n\n def __getitem__(self, idx):\n sample = self.__sample_generate__(idx)\n if 'segmentation' in sample.keys():\n sample['mask'] = sample['segmentation'] < self.num_categories\n t = sample['segmentation'].copy()\n t[t >= self.num_categories] = 0\n sample['segmentation_onehot'] = onehot(t, self.num_categories)\n return self.totensor(sample)\n\n def __sample_generate__(self, idx, split_idx=0):\n name = self.load_name(idx)\n image = self.load_image(idx)\n r, c, _ = image.shape\n sample = {'image': image, 'name': name, 'row': r, 'col': c, 'batch_idx': idx}\n\n if 'test' in self.period:\n return self.__transform__(sample)\n # elif self.cfg.DATA_PSEUDO_GT and idx >= split_idx and 'train' in self.period:\n # segmentation, segmentation2, segmentation3, seg_gt = self.load_pseudo_segmentation(idx)\n # else:\n # segmentation = self.load_segmentation(idx)\n segmentation2, seg_gt = self.load_pseudo_segmentation(idx)\n\n segmentation = self.seg_dict[idx][0].numpy() #h,w\n\n sample['segmentation'] = segmentation\n sample['segmentation2'] = segmentation2\n t = sample['segmentation'].copy()\n t[t >= self.num_categories] = 0\n sample['category'] = seg2cls(t, self.num_categories)\n sample['category_copypaste'] = np.zeros(sample['category'].shape)\n\n sample['segmentationgt'] = seg_gt\n\n if self.transform == 'none' and self.cfg.DATA_FEATURE_DIR:\n feature = self.load_feature(idx)\n sample['feature'] = feature\n return self.__transform__(sample)\n\n def load_name(self, idx):\n name = self.name_list[idx]\n return name\n\n def load_image(self, idx):\n name = self.name_list[idx]\n img_file = self.img_dir + '/' + name + '.jpg'\n image = cv2.imread(img_file)\n image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image_rgb\n\n def load_segmentation(self, idx):\n name = self.name_list[idx]\n seg_file = self.seg_dir + '/' + name + '.png'\n segmentation = np.array(Image.open(seg_file))\n return segmentation\n\n def load_pseudo_segmentation(self, idx):\n name = self.name_list[idx]\n seg_file = self.pseudo_gt_dir + '/' + name + '.png'\n segmentation1 = Image.open(seg_file)\n width, height = segmentation1.size\n segmentation1 = np.array(segmentation1)\n\n seg_gt_file = self.seg_dir_gt + '/' + name + '.png'\n seg_gt = np.array(Image.open(seg_gt_file).resize((width, height)))\n\n return segmentation1, seg_gt\n\n\n\n def __colormap(self, N):\n \"\"\"Get the map from label index to color\n\n Args:\n N: number of class\n\n return: a Nx3 matrix\n\n \"\"\"\n cmap = np.zeros((N, 3), dtype=np.uint8)\n\n def uint82bin(n, count=8):\n \"\"\"returns the binary of integer n, count refers to amount of bits\"\"\"\n return ''.join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])\n\n for i in range(N):\n r = 0\n g = 0\n b = 0\n idx = i\n for j in range(7):\n str_id = uint82bin(idx)\n r = r ^ (np.uint8(str_id[-1]) << (7 - j))\n g = g ^ (np.uint8(str_id[-2]) << (7 - j))\n b = b ^ (np.uint8(str_id[-3]) << (7 - j))\n idx = idx >> 3\n cmap[i, 0] = r\n cmap[i, 1] = g\n cmap[i, 2] = b\n return cmap\n\n def load_ranked_namelist(self):\n df = self.read_rank_result()\n self.name_list = df['filename'].values\n\n def label2colormap(self, label):\n m = label.astype(np.uint8)\n r, c = m.shape\n cmap = np.zeros((r, c, 3), dtype=np.uint8)\n cmap[:, :, 0] = (m & 1) << 7 | (m & 8) << 3\n cmap[:, :, 1] = (m & 2) << 6 | (m & 16) << 2\n cmap[:, :, 2] = (m & 4) << 5\n cmap[m == 255] = [255, 255, 255]\n return cmap\n\n def save_result(self, result_list, model_id):\n \"\"\"Save test results\n\n Args:\n result_list(list of dict): [{'name':name1, 'predict':predict_seg1},{...},...]\n\n \"\"\"\n folder_path = os.path.join(self.rst_dir, '%s_%s' % (model_id, self.period))\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n for sample in result_list:\n file_path = os.path.join(folder_path, '%s.png' % sample['name'])\n cv2.imwrite(file_path, sample['predict'])\n\n def save_pseudo_gt(self, result_list, folder_path=None):\n \"\"\"Save pseudo gt\n\n Args:\n result_list(list of dict): [{'name':name1, 'predict':predict_seg1},{...},...]\n\n \"\"\"\n i = 1\n folder_path = self.pseudo_gt_dir if folder_path is None else folder_path\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n for sample in result_list:\n file_path = os.path.join(folder_path, '%s.png' % (sample['name']))\n cv2.imwrite(file_path, sample['predict'])\n i += 1\n\n def do_matlab_eval(self, model_id):\n import subprocess\n path = os.path.join(self.root_dir, 'VOCcode')\n eval_filename = os.path.join(self.eval_dir, '%s_result.mat' % model_id)\n cmd = 'cd {} && '.format(path)\n cmd += 'matlab -nodisplay -nodesktop '\n cmd += '-r \"dbstop if error; VOCinit; '\n cmd += 'VOCevalseg(VOCopts,\\'{:s}\\');'.format(model_id)\n cmd += 'accuracies,avacc,conf,rawcounts = VOCevalseg(VOCopts,\\'{:s}\\'); '.format(model_id)\n cmd += 'save(\\'{:s}\\',\\'accuracies\\',\\'avacc\\',\\'conf\\',\\'rawcounts\\'); '.format(eval_filename)\n cmd += 'quit;\"'\n\n print('start subprocess for matlab evaluation...')\n print(cmd)\n subprocess.call(cmd, shell=True)\n\n def do_python_eval(self, model_id):\n predict_folder = os.path.join(self.rst_dir, '%s_%s' % (model_id, self.period))\n gt_folder = self.seg_dir\n TP = []\n P = []\n T = []\n for i in range(self.num_categories):\n TP.append(multiprocessing.Value('i', 0, lock=True))\n P.append(multiprocessing.Value('i', 0, lock=True))\n T.append(multiprocessing.Value('i', 0, lock=True))\n\n def compare(start, step, TP, P, T):\n for idx in range(start, len(self.name_list), step):\n # print('%d/%d'%(idx,len(self.name_list)))\n name = self.name_list[idx]\n predict_file = os.path.join(predict_folder, '%s.png' % name)\n gt_file = os.path.join(gt_folder, '%s.png' % name)\n predict = np.array(Image.open(predict_file)) # cv2.imread(predict_file)\n gt = np.array(Image.open(gt_file))\n cal = gt < 255\n mask = (predict == gt) * cal\n\n for i in range(self.num_categories):\n P[i].acquire()\n P[i].value += np.sum((predict == i) * cal)\n P[i].release()\n T[i].acquire()\n T[i].value += np.sum((gt == i) * cal)\n T[i].release()\n TP[i].acquire()\n TP[i].value += np.sum((gt == i) * mask)\n TP[i].release()\n\n p_list = []\n for i in range(8):\n p = multiprocessing.Process(target=compare, args=(i, 8, TP, P, T))\n p.start()\n p_list.append(p)\n for p in p_list:\n p.join()\n IoU = []\n for i in range(self.num_categories):\n IoU.append(TP[i].value / (T[i].value + P[i].value - TP[i].value + 1e-10))\n loglist = {}\n for i in range(self.num_categories):\n if i == 0:\n print('%11s:%7.3f%%' % ('background', IoU[i] * 100), end='\\t')\n loglist['background'] = IoU[i] * 100\n else:\n if i % 2 != 1:\n print('%11s:%7.3f%%' % (self.categories[i - 1], IoU[i] * 100), end='\\t')\n else:\n print('%11s:%7.3f%%' % (self.categories[i - 1], IoU[i] * 100))\n loglist[self.categories[i - 1]] = IoU[i] * 100\n\n miou = np.mean(np.array(IoU))\n print('\\n======================================================')\n print('%11s:%7.3f%%' % ('mIoU', miou * 100))\n loglist['mIoU'] = miou * 100\n return loglist\n\n def do_python_eval_batch_pseudo_one_process(self):\n self.seg_dir_gt = os.path.join(self.dataset_dir, 'SegmentationClassAug')\n gt_folder = self.seg_dir_gt\n TP_gt_epoch = [0] * 21\n P_gt_epoch = [0] * 21\n T_gt_epoch = [0] * 21\n loglist = {}\n for idx in range(len(self.name_list)):\n # print(idx)\n name = self.name_list[idx]\n gt_file = os.path.join(gt_folder, '%s.png' % name)\n gt = np.array(Image.open(gt_file))\n r, c = gt.shape\n predict = self.seg_dict[idx].cpu().numpy()\n\n TP_gt_epoch, P_gt_epoch, T_gt_epoch = update_iou_stat(predict, gt, TP_gt_epoch,\n P_gt_epoch, T_gt_epoch)\n IoU_gt_epoch = compute_iou(TP_gt_epoch, P_gt_epoch, T_gt_epoch)\n for indx, class_name in enumerate(\n ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow',\n 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',\n 'tvmonitor']):\n loglist[class_name] = IoU_gt_epoch[indx]\n mIoU_clean_epoch = np.mean(np.array(IoU_gt_epoch))\n loglist['mIoU'] = mIoU_clean_epoch\n return loglist\n\n def __coco2voc(self, m):\n r, c = m.shape\n result = np.zeros((r, c), dtype=np.uint8)\n for i in range(0, 21):\n for j in self.coco2voc[i]:\n result[m == j] = i\n return result\n","repo_name":"Kangningthu/ADELE","sub_path":"lib/datasets/VOCTrainwsegDataset.py","file_name":"VOCTrainwsegDataset.py","file_ext":"py","file_size_in_byte":17945,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"81"} +{"seq_id":"36040401690","text":"import re\n\ndef id_validator(prompt):\n id = input(prompt)\n if bool(re.match('AVpe_{1}[-_A-Za-z0-9]{15}', id))!=True:\n id = input(prompt)\n return id\n\ndef categories_validator(prompt, sep=\",\"):\n categories = input(prompt)\n if bool(re.match(\"(([A-Z]{1}[a-z`' ]{1,}\\,{0,1}){1,}|(none|None))\", categories))!=True:\n categories = input(prompt)\n if isinstance(categories,str):\n categories = categories.split(sep)\n return categories\n\ndef keys_validator(prompt, sep=','):\n keys = input(prompt)\n if bool(re.match('([a-z\\/,.0-9]{1,}|(none|None))', keys))!=True:\n keys = input(prompt)\n if isinstance(keys,str):\n keys = keys.split(sep)\n return keys\n\ndef name_validator(prompt):\n name = input(prompt)\n if bool(re.match(\"(([A-Z]{1}[A-Za-z'().,/-0-9 ]{1,}\\s{0,1}){1,}|(none|None))\",name))!=True:\n name = input(prompt)\n return name\n","repo_name":"igortereshchenko/datascience","sub_path":"hudymvm/validator/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35660404126","text":"#!/usr/local/bin/python\r\nimport datetime\r\nimport os\r\n\r\nfrom reportlab.lib import colors, styles\r\nfrom reportlab.lib.enums import TA_JUSTIFY, TA_RIGHT\r\nfrom reportlab.lib.pagesizes import A4, inch, landscape\r\nfrom reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph, Spacer\r\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\r\n\r\n\r\nclass Bill():\r\n def __init__(self, service, part, cust, vehicle, emp):\r\n print('bill class')\r\n doc = SimpleDocTemplate(\"test_report_lab.pdf\", pagesize=A4, rightMargin=30, leftMargin=30, topMargin=30,\r\n bottomMargin=18)\r\n # doc.pagesize = landscape(A4)\r\n styles = getSampleStyleSheet()\r\n styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))\r\n styles.add(ParagraphStyle(name='Right', alignment=TA_RIGHT))\r\n elements = []\r\n\r\n total = 0\r\n cust_veh = [\r\n [Paragraph(\"Customer ID : {0}\".format(cust[0]), styles[\"Normal\"]),\r\n Paragraph(\"Service ID : {0}\".format(service[0]), styles[\"Normal\"])],\r\n [Paragraph(\"Name : {0}\".format(cust[1]), styles[\"Normal\"]),\r\n Paragraph(\"Job Desc. : {0}\".format(service[1]), styles[\"Normal\"])],\r\n [Paragraph(\"Address : {0}\".format(cust[2]), styles[\"Normal\"]),\r\n Paragraph(\"Job Date : {0}\".format(service[2]), styles[\"Normal\"])],\r\n [Paragraph(\"Mobile : {0}\".format(cust[3]), styles[\"Normal\"]),\r\n Paragraph(\"Distance : {0}\".format(service[3]), styles[\"Normal\"])],\r\n [Paragraph(\"Email : {0}\".format(cust[4]), styles[\"Normal\"])],\r\n [Paragraph(\"Vehicle ID : {0}\".format(vehicle[0]), styles[\"Normal\"]),\r\n Paragraph(\"Service Advisor : {0}\".format(emp[0]), styles[\"Normal\"])],\r\n [Paragraph(\"Reg. no : {0}\".format(vehicle[1]), styles[\"Normal\"]),\r\n Paragraph(\"SA Contact no : {0}\".format(emp[1]), styles[\"Normal\"])],\r\n [Paragraph(\"Company : {0}\".format(vehicle[2]), styles[\"Normal\"])],\r\n [Paragraph(\"Model : {0}\".format(vehicle[3]), styles[\"Normal\"])],\r\n [Paragraph(\"Type : {0}\".format(vehicle[4]), styles[\"Normal\"])]\r\n ]\r\n csv = TableStyle([('VALIGN', (0, 0), (-1, -1), 'TOP')])\r\n table_cust = Table(cust_veh)\r\n table_cust.setStyle(csv)\r\n data = [[\"Sr \\nno.\", \"Part ID\", \"Part\", \"Rate\", 'Quantity', \"Amount\"]]\r\n i = 1\r\n for x in part:\r\n s = [str(i)]\r\n for a in x:\r\n s.append(str(a))\r\n total += x[4]\r\n data.append(s)\r\n i += 1\r\n data.append(['', '', '', '', 'Total Amount', str(total)])\r\n\r\n # TODO: Get this line right instead of just copying it from the docs\r\n style = TableStyle([('ALIGN', (1, 1), (-2, -2), 'RIGHT'),\r\n ('VALIGN', (0, 0), (0, -1), 'TOP'),\r\n ('ALIGN', (0, -1), (-1, -1), 'CENTER'),\r\n ('VALIGN', (0, -1), (-1, -1), 'MIDDLE'),\r\n ('BOX', (0, 0), (-1, -2), 0.25, colors.black),\r\n ('BOX', (0, 0), (5, 0), 0.25, colors.black),\r\n ('BOX', (0, -1), (-1, -1), 0.25, colors.black)\r\n ])\r\n\r\n # Configure style and word wrap\r\n s = getSampleStyleSheet()\r\n s = s[\"BodyText\"]\r\n s.wordWrap = 'CJK'\r\n data2 = [[Paragraph(cell, s) for cell in row] for row in data]\r\n t = Table(data2, colWidths=[0.5*inch, None])\r\n t.setStyle(style)\r\n sign_style = TableStyle([('ALIGN', (0, 0), (0, 0), 'LEFT'),\r\n ('ALIGN', (1, 0), (1, 0), 'RIGHT')\r\n ])\r\n sign = [[Paragraph(\"(Customer Signature)\", styles[\"Normal\"]),\r\n Paragraph(\"(Authorized Signature)\", styles[\"Right\"])]]\r\n sign_table = Table(sign)\r\n sign_table.setStyle(sign_style)\r\n\r\n # Send the data and build the file\r\n now = datetime.datetime.now()\r\n now = \"Print Date: {0} Timing: {1}\".format(now.strftime(\"%Y-%b-%d\"),\r\n now.strftime(\"%H:%M:%S\"))\r\n elements.append(Paragraph(now, styles[\"Right\"]))\r\n elements.append(table_cust)\r\n elements.append(Spacer(1, 10))\r\n elements.append(t)\r\n elements.append(Spacer(1, 70))\r\n elements.append(sign_table)\r\n doc.build(elements)\r\n os.startfile(\"test_report_lab.pdf\")\r\n\r\n\r\n# b = Bill((2, 'Maintenance', datetime.date(2018, 10, 1), 6769, 'Dent on left', 2782.0, 5, 1),\r\n# [(1, 'Bumper Front', 1355.0, 1, 1355.0), (5, 'Engine oil', 450.0, 1, 450.0),\r\n# (3, 'High-pitched Horn', 665.0, 1, 665.0), (4, 'Windshield Wiper', 156.0, 2, 312.0)],\r\n# (2, 'Rushi', 'Chemburasfn nansofnfas asin oaisnf a nsfoains n oi asnfo iansfo in', 9563272653, 'rushipowar@gmail.com'),\r\n# (5, 'MH31W2657', 'Renalt', 'Duster', 'SUV'),\r\n# ('Ashish', 8615742365))\r\n","repo_name":"bhaveshgadag/vehicle_management_python","sub_path":"invoicedemo.py","file_name":"invoicedemo.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10857412457","text":"class Solution:\n def plusOne(self, digits):\n \"\"\"\n Runtime : faster than 61.90% of Python3\n Memory Usage : less than 5.29% of Python3\n \"\"\"\n digits = list(map(str, digits))\n nums = int(''.join(digits))\n nums += 1\n nums = list(str(nums))\n return list(map(int, nums))\n \ns = Solution()\nprint(s.plusOne([1,2,3]))\nprint(s.plusOne([4,3,2,1]))\n","repo_name":"dongzooo/Coding-test-with-Py","sub_path":"구현/leetcode66.py","file_name":"leetcode66.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74659572744","text":"from django.contrib.auth import get_user_model\nfrom rest_framework.test import APITestCase\n\nfrom posts.models import Post, PostLike\nfrom posts.services import do_like, do_unlike\n\n\nclass PostLikeTestCase(APITestCase):\n def setUp(self):\n self.user1 = get_user_model().objects.create(\n username='testuser',\n password='123123',\n )\n self.client.force_login(self.user1)\n\n self.post = Post.objects.create(text='Test post content 1', creator_id=self.user1.id)\n\n def test_do_like(self):\n like = do_like(post_id=self.post.id, creator_id=self.user1.id)\n\n like_data = {\n 'id': like.id,\n 'creator_id': like.creator_id,\n 'post_id': like.post_id,\n 'created_at': like.created_at.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),\n }\n\n real_like = {\n 'id': like.id,\n 'created_at': like.created_at.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),\n 'creator_id': self.user1.id,\n 'post_id': self.post.id,\n }\n\n self.assertEqual(real_like, like_data)\n\n def test_do_unlike(self):\n like = do_like(post_id=self.post.id, creator_id=self.user1.id)\n do_unlike(post_id=self.post.id, creator_id=self.user1.id)\n like = PostLike.objects.filter(id=like.id).first()\n\n self.assertIsNone(like)\n","repo_name":"tvoyserezha/simple_social_network","sub_path":"posts/tests/test_like.py","file_name":"test_like.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71619520904","text":"\"\"\"\nexternal_query_many.py\n\nRuns external_query.py for each cell line in the corpus and optionally also\ncomputes introspect. Most of the arguments come from the config file.\nThe default config file points to the latest signature and similarity\ndirectories.\n\n\"\"\"\n\nimport ConfigParser\nimport argparse\nimport datetime\nimport logging\nimport os\nimport sys\nimport traceback\n\nimport cmapPy.pandasGEXpress.parse as parse\nimport cmapPy.pandasGEXpress.write_gct as wg\nimport cmapPy.pandasGEXpress.concat as cg\n\nimport broadinstitute_psp.external_query.external_query as eq\nimport broadinstitute_psp.introspect.introspect as introspect\nimport broadinstitute_psp.utils.setup_logger as setup_logger\n\n__author__ = \"Lev Litichevskiy\"\n__email__ = \"lev@broadinstitute.org\"\n\nlogger = logging.getLogger(setup_logger.LOGGER_NAME)\n\nINTERNAL_GCT_FORMAT = \"{assay}_{cell}_DIFF.gct\"\nBG_GCT_FORMAT = \"{assay}_{cell}_SIM.gct\"\nOUT_STEEP_FORMAT = \"{cell}_SIM.gct\"\nOUT_SIP_FORMAT = \"{cell}_CONN.gct\"\nOUT_CONCATED_NAME = \"CONCATED_CONN.gct\"\nOUT_INTROSPECT_NAME = \"INTROSPECT_CONN.gct\"\n\n\ndef build_parser():\n \"\"\"Build argument parser.\"\"\"\n\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n # Required args\n parser.add_argument(\"--assay\", \"-a\", required=True, choices=[\"GCP\", \"P100\"],\n help=\"which assay's data to query\")\n parser.add_argument(\"--introspect\", \"-i\", action=\"store_true\", default=False,\n help=\"whether to also compute introspect\")\n parser.add_argument(\"--external_gct_path\", \"-e\", required=True,\n help=\"path to gct file of external profiles\")\n parser.add_argument(\"--out_dir\", \"-o\", required=True,\n help=\"directory in which to dump output\")\n parser.add_argument(\"--psp_on_clue_config_path\", \"-p\",\n default=\"clue/psp_on_clue.yml\",\n help=\"filepath to psp_on_clue.yml\")\n parser.add_argument(\"--fields_to_aggregate_for_external_profiles\", \"-fae\",\n nargs=\"*\", default=[\"pert_id\", \"cell_id\", \"pert_time\"],\n help=\"list of metadata fields to use in aggregating replicates in external profiles\")\n parser.add_argument(\"--all\", default=False, action=\"store_true\",\n help=\"whether to produce all output matrices\")\n\n # Optional args\n parser.add_argument(\"--verbose\", \"-v\", action=\"store_true\", default=False,\n help=\"whether to increase the # of messages reported\")\n\n return parser\n\n\ndef main(args):\n\n # Record start_time\n start_time = datetime.datetime.now()\n start_time_msg = \"external_query_many.py started at {}\".format(\n start_time.strftime('%Y-%m-%d %H:%M:%S'))\n\n # Create output directory\n assert os.path.exists(args.out_dir), \"args.out_dir: {}\".format(args.out_dir)\n\n try:\n\n # Read and unpack config file\n (cells, internal_gct_dir, bg_gct_dir,\n fields_to_aggregate_for_internal_profiles, similarity_metric,\n connectivity_metric) = read_config_file(args.psp_on_clue_config_path)\n\n # Read in the external profiles only once\n external_gct = parse.parse(args.external_gct_path)\n\n # If requested, do introspect\n (_, introspect_gct) = introspect.do_steep_and_sip(\n external_gct, similarity_metric, connectivity_metric,\n args.fields_to_aggregate_for_external_profiles)\n\n # Write introspect result\n actual_out_introspect_name = os.path.join(args.out_dir, OUT_INTROSPECT_NAME)\n wg.write(introspect_gct, actual_out_introspect_name, data_null=\"NaN\", metadata_null=\"NaN\", filler_null=\"NaN\")\n\n # Initialize list to store connectivity gcts\n list_of_conn_gcts = []\n\n # Loop over cell lines in corpus\n for cell in cells:\n\n # Import gct with the internal profiles for this cell line\n internal_gct_path = os.path.join(internal_gct_dir, INTERNAL_GCT_FORMAT.format(\n assay=args.assay, cell=cell))\n internal_gct = parse.parse(internal_gct_path)\n\n # Import gct with the similarity matrix for this cell line\n bg_gct_path = os.path.join(bg_gct_dir, BG_GCT_FORMAT.format(\n assay=args.assay, cell=cell))\n bg_gct = parse.parse(bg_gct_path)\n\n (sim_gct, conn_gct) = eq.do_steep_and_sip(\n external_gct, internal_gct, bg_gct, \"spearman\",\n \"ks_test\", args.fields_to_aggregate_for_external_profiles,\n fields_to_aggregate_for_internal_profiles)\n\n # Append this connectivity gct\n list_of_conn_gcts.append(conn_gct)\n\n # Write all output gcts if requested\n if args.all:\n out_steep_name = os.path.join(args.out_dir, OUT_STEEP_FORMAT.format(cell=cell))\n out_sip_name = os.path.join(args.out_dir, OUT_SIP_FORMAT.format(cell=cell))\n\n wg.write(sim_gct, out_steep_name)\n wg.write(conn_gct, out_sip_name)\n\n # Concatenate connectivity GCTs\n concated = cg.vstack(list_of_conn_gcts)\n actual_out_concated_name = os.path.join(args.out_dir, OUT_CONCATED_NAME)\n\n # Write concatenated result\n wg.write(concated, actual_out_concated_name, data_null=\"NaN\", filler_null=\"NaN\", metadata_null=\"NaN\")\n\n # Write success.txt with timestamp\n success_path = os.path.join(args.out_dir, \"success.txt\")\n write_success(success_path, start_time_msg)\n\n # Return how much time it took\n end_time = datetime.datetime.now()\n seconds_elapsed = (end_time - start_time).seconds\n logger.info(\"external_query_many.py completed in {:.0f} sec.\".format(seconds_elapsed))\n\n except Exception:\n failure_path = os.path.join(args.out_dir, \"failure.txt\")\n msg = \"external_query_many.py failed. See {} for stacktrace.\".format(failure_path)\n\n # Write failure.txt\n write_failure(failure_path, start_time_msg)\n\n # Raise exception\n logger.error(msg)\n raise Exception(msg)\n\n return None\n\n\ndef read_config_file(config_path):\n\n assert os.path.exists(config_path), (\n \"Config file can't be found. config_path: {}\".format(config_path))\n\n # Read config file\n config_parser = ConfigParser.RawConfigParser()\n config_parser.read(config_path)\n\n # Return config fields as dictionarires\n config_corpus = dict(config_parser.items(\"corpus\"))\n config_metadata = dict(config_parser.items(\"metadata\"))\n config_algorithms = dict(config_parser.items(\"algorithms\"))\n\n # Unpack the config file\n cells = eval(config_corpus[\"cells\"])\n internal_gct_dir = config_corpus[\"signature_dir\"]\n bg_gct_dir = config_corpus[\"sim_dir\"]\n fields_to_aggregate_for_internal_profiles = eval(config_metadata[\"fields_to_aggregate_for_internal_profiles\"])\n similarity_metric = config_algorithms[\"similarity_metric\"]\n connectivity_metric = config_algorithms[\"connectivity_metric\"]\n\n return cells, internal_gct_dir, bg_gct_dir, \\\n fields_to_aggregate_for_internal_profiles, \\\n similarity_metric, connectivity_metric\n\n\ndef write_success(file_name, start_time_msg):\n # Create timestamp\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n # Write timestamp to file_name\n f = open(file_name, 'w')\n f.write(start_time_msg + \"\\n\")\n f.write(\"external_query_many.py completed at {}\\n\".format(timestamp))\n f.close()\n\n\ndef write_failure(file_name, start_time_msg):\n # Record stacktrace\n _, _, exc_traceback = sys.exc_info()\n\n # Write stacktrace to file_name\n f = open(file_name, \"w\")\n f.write(start_time_msg + \"\\n\")\n traceback.print_exc(exc_traceback, file=f)\n f.close()\n\n\nif __name__ == \"__main__\":\n args = build_parser().parse_args(sys.argv[1:])\n setup_logger.setup(verbose=args.verbose)\n\n main(args)\n","repo_name":"cmap/psp","sub_path":"broadinstitute_psp/external_query/external_query_many.py","file_name":"external_query_many.py","file_ext":"py","file_size_in_byte":7992,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"41238105593","text":"\"\"\"\nUtility functions for running commands at most once, and capturing their logs to a file.\n\"\"\"\nimport subprocess\nfrom rlscope.profiler.rlscope_logging import logger\nimport contextlib\nimport textwrap\n\nimport shlex\nimport pprint\nimport re\nimport sys\nimport os\nfrom os.path import join as _j, abspath as _a, dirname as _d, exists as _e, basename as _b\n\n# from rlscope.parser.common import *\nfrom rlscope.profiler.util import get_stacktrace\nfrom rlscope.profiler.util import print_cmd\n\nimport tempfile\n\n@contextlib.contextmanager\ndef in_directory(directory, allow_none=True):\n assert not( not allow_none and directory is None )\n if directory is not None:\n original_directory = os.getcwd()\n try:\n if directory is not None:\n os.chdir(directory)\n yield\n finally:\n if directory is not None:\n os.chdir(original_directory)\n\ndef tee(cmd, to_file,\n cwd=None,\n append=False,\n makedirs=True,\n check=True,\n dry_run=False,\n tee_output=True,\n tee_cmd=None,\n tee_prefix=None,\n only_show_env=None,\n **kwargs):\n \"\"\"\n Run shell *cmd*, outputting its results to *to_file*, and echo-ing the output to ``sys.stdout``.\n\n Arguments\n ---------\n cmd: List[str]\n Shell command.\n to_file: str\n Save *cmd* output to this file.\n cwd: str\n Directory to run command from.\n append: bool\n Append to *to_file*.\n makedirs: bool\n check: bool\n dry_run: bool\n tee_output: bool\n tee_prefix: str\n only_show_env: Set[str]\n \"\"\"\n\n # In case there are int's or float's in cmd.\n cmd = [str(opt) for opt in cmd]\n\n if tee_cmd is None:\n tee_cmd = tee_output\n\n if dry_run:\n if tee_cmd:\n with in_directory(cwd):\n print_cmd(cmd, files=[sys.stdout], env=kwargs.get('env', None), dry_run=dry_run, only_show_env=only_show_env)\n return\n\n with ScopedLogFile(to_file, append=append, makedirs=makedirs) as f:\n with in_directory(cwd):\n if tee_cmd:\n files = [sys.stdout, f]\n else:\n files = [f]\n print_cmd(cmd, files=files, env=kwargs.get('env', None), only_show_env=only_show_env)\n\n # NOTE: Regarding the bug mentioned below, using p.communicate() STILL hangs.\n #\n # p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n # with p:\n # outs, errs = p.communicate()\n # sys.stdout.write(outs)\n # sys.stdout.write(errs)\n # sys.stdout.flush()\n #\n # f.write(outs)\n # f.write(errs)\n # f.flush()\n\n debug = False\n\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs)\n with p:\n # NOTE: if this blocks it may be because there's a zombie utilization_sampler.py still running\n # (that was created by the training script) that hasn't been terminated!\n # for line in p.stdout:\n\n while True:\n\n if debug:\n logger.info(\"RUN [05]: p.poll()\")\n rc = p.poll()\n if rc is not None:\n break\n\n # BUG: SOMETIMES (I don't know WHY), this line will HANG even after\n # train_stable_baselines.sh is terminated (it shows up as a Zombie process in htop/top).\n # Sadly, this only happens occasionally, and I have yet to understand WHY it happens.\n if debug:\n logger.info(\"RUN [06]: line = p.stdout.readline()\")\n line = p.stdout.readline()\n\n # b'\\n'-separated lines\n if debug:\n logger.info(\"RUN [07]: line.decode\")\n line = line.decode(\"utf-8\")\n\n if debug:\n logger.info(\"RUN [08]: line (\\\"{line}\\\") == '' (result={result})\".format(\n result=(line == ''),\n line=line))\n if line == '':\n break\n\n if re.search(r'> train\\.py has exited', line):\n pass\n # logger.info(\"> ENABLE TEE DEBUGGING\")\n # debug = True\n\n if debug:\n logger.info(\"RUN [01]: sys.stdout.write(line)\")\n if tee_output:\n if tee_prefix is not None:\n sys.stdout.write(tee_prefix)\n sys.stdout.write(line)\n if debug:\n logger.info(\"RUN [02]: sys.stdout.flush()\")\n if tee_output:\n sys.stdout.flush()\n\n if debug:\n logger.info(\"RUN [03]: f.write(line)\")\n f.write(line)\n if debug:\n logger.info(\"RUN [04]: f.flush()\")\n f.flush()\n if tee_output:\n sys.stdout.flush()\n f.flush()\n\n if check and p.returncode != 0:\n raise subprocess.CalledProcessError(p.returncode, p.args)\n return p\n\nEXPERIMENT_SUCCESS_LINE = \"IML BENCH DONE\"\n\nclass ScopedLogFile:\n def __init__(self, file, append=False, makedirs=True):\n self.file = file\n self.append = append\n self.makedirs = makedirs\n\n def __enter__(self):\n if self._is_path:\n # if self.append:\n # self.mode = 'ab'\n # else:\n # self.mode = 'wb'\n\n if self.append:\n self.mode = 'a'\n else:\n self.mode = 'w'\n if self.makedirs:\n # logger.info(\"mkdirs {path}\".format(path=_d(self.file)))\n os.makedirs(_d(self.file), exist_ok=True)\n # logger.info(\"ScopedLogFile.file = {path}\".format(path=self.file))\n self.f = open(self.file, self.mode)\n return self.f\n else:\n # We can only append to a stream.\n self.f = self.file\n return self.f\n\n @property\n def _is_path(self):\n return type(self.file) == str\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.f.flush()\n if self._is_path:\n self.f.close()\n\ndef expr_run_cmd(cmd, to_file,\n cwd=None,\n env=None,\n replace=False,\n dry_run=False,\n skip_error=False,\n tee_output=True,\n tee_cmd=None,\n tee_prefix=None,\n # extra_argv=[],\n only_show_env=None,\n debug=False,\n raise_exception=False,\n exception_class=None,\n log_errors=True,\n log_func=None):\n \"\"\"\n Run an experiment, if it hasn't been run already.\n We check if an experiment as already been run by looking for a log file, and whether that logfile has a success-line in it\n (we search for \"IML BENCH DONE\")\n :param self:\n :param cmd:\n :param to_file:\n :param env:\n :param replace:\n :param debug:\n :return:\n \"\"\"\n\n if log_func is None:\n log_func = logger.error\n\n if env is None:\n # Make sure rls-run get RLSCOPE_POSTGRES_HOST\n env = dict(os.environ)\n\n proc = None\n failed = False\n if replace or not expr_already_ran(to_file, debug=debug):\n\n try:\n tee_kwargs = dict()\n if skip_error:\n tee_kwargs['check'] = False\n proc = tee(\n cmd=cmd,\n to_file=to_file,\n cwd=cwd,\n env=env,\n dry_run=dry_run,\n tee_output=tee_output,\n tee_cmd=tee_cmd,\n tee_prefix=tee_prefix,\n only_show_env=only_show_env,\n **tee_kwargs,\n )\n if not dry_run and skip_error and proc.returncode != 0:\n if log_errors:\n log_func(\n \"Command failed; see {path}; continuing\".format(\n path=to_file,\n ))\n failed = True\n except subprocess.CalledProcessError as e:\n\n err_msg = textwrap.dedent(\"\"\"\\\n Command failed: see {path} for command and output.\n \"\"\").format(\n path=to_file,\n ).rstrip()\n if log_errors:\n logger.error(err_msg)\n if raise_exception:\n if exception_class is None:\n raise\n raise exception_class(err_msg)\n ret = 1\n if debug:\n logger.error(\"Exiting with ret={ret}\\n{stack}\".format(\n ret=ret,\n stack=get_stacktrace(),\n ))\n sys.exit(ret)\n\n if not failed:\n if not dry_run and proc.returncode != 0:\n logger.error(\"BUG: saw returncode = {ret}, expected 0\".format(\n ret=proc.returncode))\n assert proc.returncode == 0\n if not dry_run:\n with open(to_file, 'a') as f:\n f.write(\"{success_line}\\n\".format(success_line=EXPERIMENT_SUCCESS_LINE))\n if not dry_run:\n assert expr_already_ran(to_file, debug=debug)\n\n return proc\n\ndef expr_already_ran(to_file, debug=False):\n if not _e(to_file):\n return False\n with open(to_file) as f:\n for lineno, line in enumerate(f, start=1):\n line = line.rstrip()\n if re.search(r'{success_line}'.format(success_line=EXPERIMENT_SUCCESS_LINE), line):\n if debug:\n logger.info(\"Saw \\\"{success_line}\\\" in {path} @ line {lineno}; skipping.\".format(\n success_line=EXPERIMENT_SUCCESS_LINE,\n lineno=lineno,\n path=to_file))\n return True\n return False\n\nclass TestTee:\n\n def test_tee_01(self):\n tmp_fd, tmp_path = tempfile.mkstemp(prefix=\"test_tee_\")\n tmp_f = os.fdopen(tmp_fd, \"w\")\n tmp_f.close()\n try:\n with open(tmp_path, 'w') as f:\n # p = tee(['ls', '-l'], to_file=f)\n # p = tee(['bash', '-c', 'while true; do sleep 1; date; done'], to_file=f)\n p = tee(['bash', '-c', 'for i in `seq 1 1000`; do echo $i; done; echo \"DONE TEST\";'], to_file=f)\n with open(f.name) as readf:\n lines = readf.readlines()\n pprint.pprint({'lines':lines})\n has_done = any(re.search(r'DONE TEST', line) for line in lines)\n assert has_done\n finally:\n if _e(tmp_path):\n os.remove(tmp_path)\n\n def test_tee_02(self):\n for i in range(100):\n self.test_tee_01()\n","repo_name":"UofT-EcoSystem/rlscope","sub_path":"rlscope/experiment/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":11200,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"81"} +{"seq_id":"27615017548","text":"from fastapi import HTTPException, status\nfrom database.models.user import Building\n\n\ndef get_building(id: str):\n try:\n building = Building.objects(id=id).first().to_mongo().to_dict()\n return building\n except:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Invalid id\")\n\n\n\ndef get_buildings_by_village_id(id: str):\n try:\n return [building.to_mongo().to_dict() for building in Building.objects(village_id=id)]\n except:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Invalid village id\")\n","repo_name":"R3minisce/Farmvillage","sub_path":"Application/FastAPI_Template/database/controllers/buildings.py","file_name":"buildings.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26046925164","text":"#experimenting putting innerswitch\n#### testing nested switches looking to do count of switches\n#looking out how the code written by the user will look like\n# indentation is key \ntennis ='''\n\t\n\tswitch(exp) {\n\t\tcase 'cholpolty': \n\t\t\tprint(\\\"burrito!\\\")\n\t\t\tprint(\"bowl\")\n\t\t\tprint(\"lemonade\")\n\t\t\tbreak\n\t\n\t\tcase 'panda express':\n\t\t\tprint('good food')\n\t\t\t##===============================\n\t\t\t##===== inner s w i t c h ======\n\t\t\tswitch(exp) {\n\t\t\t\tcase 'cholpolty': \n\t\t\t\t\tprint(\\\"burrito!\\\")\n\t\t\t\t\tprint(\"bowl\")\n\t\t\t\t\tprint(\"lemonade\")\n\t\t\t\t\tbreak\n\t\n\t\t\n\t\t\t\tcase 'panda express':\n\t\t\t\t\tprint('good food')\n\t\t\t\t\tprint(\"in hollister \")\n\t\t\t\t\tadder(1)\n\t\t\t\t\tbuildstring('soon it')\n\t\t\t\t\tprint('BARK')\n\t\t\t\n\t\t\t\tdefault:\n\t\t\t\t\tprint('no results')\n\t\t\t\t\tprint(\"that is all\")\n\t\t\t\t\tbreak\n\t\t\tendswitch\n\t\t\t##===============================\n\t\t\t##===============================\n\t\tcase 'starbucks':\n\t\t\tprint('where is my mocha')\n\t\t\tprint(\"and my sausage sandwich...\")\n\t\t\tadder(2) #this won't do anything until it is executed\n\t\t\tbuildstring(' will work')\n\t\t\t\n\t\t\t\n\t\t\n\t\tcase 'starbucks':\n\t\t\tprint('where is my mocha')\n\t\t\tprint(\"and my sausage sandwich...\")\n\t\t\tadder(2) #this won't do anything until it is executed\n\t\t\tbuildstring(' will work')\n\t\t\t\n\t\t\n\t\tcase 'big testing':\n\t\t\tprint('where is my mocha')\n\t\t\tprint(\"and my sausage sandwich...\")\n\t\t\tadder(3)\n\t\t\tbuildstring(' and I will celebrate')\n\t\t\tbreak\n\t\t\t\t\n\t\tdefault:\n\t\t\tprint('no results')\n\t\t\tprint(\"that is all\")\n\t\t\tbreak\n}\n'''\nprint(\"==========\")\nhow_many_switches=tennis.count(\"switch\")\nhow_many_end_switches=tennis.count(\"endswitch\")\nanswer = how_many_switches- how_many_end_switches\nprint(\"answer of switch count=\",answer)\n\nprint(\"the number of switches found in input string=\",answer)\nprint(\"therefore it's answer ONE inner switch\")\nprint(\"==========\")\n\n'''\ndef inner_switch():\n casetest1 = ['test5','test6']\n \n switch(x) #<<====== switch() method is here\n\n while True: #<==== infinite loop used for fall thru method\n if case == \"test1\":\n print(\"yes it's one\")\n break\n\n elif case == \"test2\":\n print (\"switch case behavior works in Python now!\")\n fallthru(\"rudolph\") #<<===== fallthru() method is here *don't use* break with fallthru()\n #<<===== currently it requires the next case match in quotes \n elif case == \"test3\": #<<===== but later I will make it work using just fallthru()\n print (\"go reindeer\")\n break\n\n elif case == \"test4\":\n print (\"testing wow works on Sublime now Coooool...\")\n break\n\n elif case in casetest1: #['google', 'fishfood', 'probability']: #<<==== several cases in list on one line\n print (\"successful test in casetest1\") #<<==== I just put case here to show which word matched\n break\n\n elif case == \"test7\":\n print (\"gui design\")\n break\n\n #default:\n else:\n print('none')\n break \n'''\n###############\n#################################\n## S W I T C H C A S E ##\n#################################\n\n###this is new testing double level inner switch nested\n#######################\n### inner switch_3\n#######################\n### this is inner_switch()\ndef inner_switch_3(n): # is the test #test7 should be input\n print(\"=======inner_switch called==3==\",n)\n casetest1 = ['test5','test6']\n #this is switch inside of inner_switch\n inswitch(n) #<<====== inswitch() method is here\n\n while True: #<==== infinite loop used for fall thru method\n if case == \"test1\":\n print(\"dam did it work?\")\n print(\"yes it's test == one\")\n tahoe[0]=\"victory\" #puts victory into tahoe[0]\n infallthru('test2')\n\n elif case == \"test2\":\n print(\"this is inside of inners witch test2\")\n #inner_switch_2('coffee')\n print (\"switch case behavior works in Python now!\")\n break #<<===== fallthru() method is here *don't use* break with fallthru()\n #<<===== currently it requires the next case match in quotes \n elif case == \"mars\": #<<===== but later I will make it work using just fallthru()\n print (\"flying toy helicopter\")\n break\n\n elif case == \"test4\":\n print (\"testing wow works on Sublime now Coooool...\")\n tahoe[0]=\"sublime\" #puts victory into tahoe[0]\n break\n\n elif case in casetest1: #['google', 'fishfood', 'probability']: #<<==== several cases in list on one line\n print (\"successful test in casetest1\") #<<==== I just put case here to show which word matched\n break\n\n elif case == \"test7\":\n print (\"CAFE BORRONE here\")\n #######################\n inner_switch_3('mars')\n #######################\n break\n\n #default:\n else:\n print('None')\n break \n#####============================\n\n\n\n\n\n###this is new testing double level inner switch nested\n\n#######################\n### inner switch_2\n#######################\n### this is inner_switch()\ndef inner_switch_2(n): # is the test #test7 should be input\n print(\"=======inner_switch called===2=\",n)\n casetest1 = ['test5','test6']\n #this is switch inside of inner_switch\n inswitch(n) #<<====== inswitch() method is here\n\n while True: #<==== infinite loop used for fall thru method\n if case == \"test1\":\n print(\"dam did it work?\")\n print(\"yes it's test == one\")\n tahoe[0]=\"victory\" #puts victory into tahoe[0]\n infallthru('test2')\n\n elif case == \"test2\":\n print(\"this is inside of inners witch test2\")\n #inner_switch_2('coffee')\n print (\"switch case behavior works in Python now!\")\n break #<<===== fallthru() method is here *don't use* break with fallthru()\n #<<===== currently it requires the next case match in quotes \n elif case == \"test3\": #<<===== but later I will make it work using just fallthru()\n print (\"go reindeer\")\n break\n\n elif case == \"test4\":\n print (\"testing wow works on Sublime now Coooool...\")\n tahoe[0]=\"sublime\" #puts victory into tahoe[0]\n break\n\n elif case in casetest1: #['google', 'fishfood', 'probability']: #<<==== several cases in list on one line\n print (\"successful test in casetest1\") #<<==== I just put case here to show which word matched\n break\n\n elif case == \"test7\":\n print (\"CAFE BORRONE here\")\n #######################\n inner_switch_3('mars')\n ########################\n break\n #default:\n else:\n print('None')\n break \n\n\n\n\n#################################\n# value you want to run thru switch case\n\nglobal x\nx = \"one\" #it was \"one\" #<<=== x must be a string just as matching case == \"string\", \n #<<=== if using a number it will be converted to a string\n #<<=== so x = 22 will work and be converted to \"22\"\ntahoe=[]\ntahoe.append(0)\n# ======= main ===================================\ndef real_main():\n testfunction(x) #\"one\" is tested\n\nvictory=[]\nvictory.append(0)\n\n#######################\n### inner switch_1\n#######################\n### this is inner_switch()\ndef inner_switch_1(n): #test2 is the test\n print(\"=======inner_switch called==1==\",n)\n casetest1 = ['test5','test6']\n #this is switch inside of inner_switch\n inswitch(n) #<<====== inswitch() method is here\n\n while True: #<==== infinite loop used for fall thru method\n if case == \"test1\":\n print(\"dam did it work?\")\n print(\"yes it's test == one\")\n tahoe[0]=\"victory\" #puts victory into tahoe[0]\n infallthru('test2')\n\n elif case == \"test2\":\n print(\"this is inside of inners witch test2\")\n \n print (\"switch case behavior works in Python now!\")\n break #<<===== fallthru() method is here *don't use* break with fallthru()\n #<<===== currently it requires the next case match in quotes \n elif case == \"test3\": #<<===== but later I will make it work using just fallthru()\n print (\"go reindeer\")\n break\n\n elif case == \"test4\":\n print (\"testing wow works on Sublime now Coooool...\")\n tahoe[0]=\"sublime\" #puts victory into tahoe[0]\n #######################\n inner_switch_2('test7')\n #######################\n break\n\n elif case in casetest1: #['google', 'fishfood', 'probability']: #<<==== several cases in list on one line\n print (\"successful test in casetest1\") #<<==== I just put case here to show which word matched\n break\n\n elif case == \"test7\":\n print (\"gui design\")\n break\n\n #default:\n else:\n print('None')\n break \n\n\n# ======= switch =================================\ndef switch(x):\n if type(x) != str: #checks to make sure it's a string if for example a number is passed as x\n x = str(x)\n global case\n case = x.lower() \n \n# ======= fallthru =========================\ndef fallthru(y):\n eval(\"switch('\" + y + \"')\")\n \n \n#==================\n#for inswitch\ndef inswitch(n):\n if type(x) != str: #checks to make sure it's a string if for example a number is passed as x\n n = str(n)\n global case\n case = n.lower() \n\n#=====================\n# for infallthru \ndef infallthru(n):\n eval(\"inswitch('\" + n + \"')\")\n\n\n\n\n\ncasetest1 =['google', 'fishfood', 'probability']\n\n#===== SWITCH CASE CODE demonstration is inside function testfunction(x) below =========\n# this is a function with a switch case\n# which has a nested switch case within it inside of a function called inner_switch_1\n# ======= testfunction ========================\ndef testfunction(x):\n print(\"=== testfunction called with x====TESTING THIS JUNE 9th======= \",x)\n print('test function testing switch case behavior')\n#this is the switch called \n# ======== switch case code =======================\n switch(x) #<<====== switch() method is here\n while True: #<==== infinite loop used for fall thru method\n if case == \"one\":\n print(\"yes it's one\")\n ######################\n inner_switch_1('test4') #force feeding it for testing switch case function actually \n ######################\n print(\"output from innerswitch below\")\n print(\"tahoe[0]=\",tahoe[0]) #result of innerswitch running\n #what output is there from inner_switch?? use a list to capture it\n fallthru('word')\n\n elif case == \"word\":\n print (\"this is back in main switch now !\")\n fallthru(\"rudolph\") #<<===== fallthru() method is here *don't use* break with fallthru()\n #<<===== currently it requires the next case match in quotes \n elif case == \"rudolph\": #<<===== but later I will make it work using just fallthru()\n print (\"go reindeer\")\n break\n\n elif case == \"phrase\":\n print (\"testing wow works on Sublime now Coooool...\")\n break\n\n elif case in casetest1: #['google', 'fishfood', 'probability']: #<<==== several cases in list on one line\n print (\"successful test in casetest1\") #<<==== I just put case here to show which word matched\n break\n\n elif case == \"22\":\n print (\"gui design\")\n break\n\n #default:\n else:\n print('none')\n break \n#end loop\n#end switch\n\n\n# ==== end of switch case ======================\n# end testfunction\n\nif __name__ == \"__main__\":\n real_main()\n \n #should be called when it loads page so call real_main() \n \n \n ###########\n#real goto code\n #import web_pdb #; web_pdb.set_trace()\n#c.execute(\"INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14)\")\nimport sys\n#import my_methods\nimport math\n\n'''\nfrom curses.ascii import CAN\nfrom asyncore import loop\nfrom setuptools.dist import sequence\nfrom pip._vendor.pyparsing import WordStart, line\nfrom test.test_getargs2 import PositionalOnlyAndKeywords_TestCase\nfrom email._header_value_parser import Phrase\nimport string\nfrom pkg_resources.extern import names\nfrom tkinter.constants import TOP\nfrom lib2to3.fixer_util import Number\nfrom array import array\nfrom cProfile import label\n'''\n#from curses.has_key import python\n#from builtins import False\n#from ctypes.wintypes import WORD\n#conn.commit()\n#conn.close()\n\n\n'''\n\n#switchcasesample = \n'''\n# switch(grade) { //notices is has the brace here starting the block\n# case 'A' :\n# get startswith(\"case\") //that should work fine testing each line\n# printf(\"Excellent!\\n\" );\n# break;\n#\n# case 'B' :\n# case 'C' :\n# printf(\"Well done\\n\" );\n# break;\n# case 'D' :\n# printf(\"You passed\\n\" );\n# break;\n# case 'F' :\n# printf(\"Better try again\\n\" );\n# break;\n# default :\n# printf(\"Invalid grade\\n\" );\n# }\n'''\n\n\n \ndef gothedistance():\n print(switchcasesample)\n print(\"%% go the distance called\")\n listnew= switchcasesample.split()\n\n print(listnew)\n for item in listnew:\n if item == \"case\":\n print(\"yes\")\n\n#gothedistance();\n\n\n\n\n\n\n\n# \n#what if I append each line with \\n\n\ndef testlines(x): #how to determine number of lines in a string\n print(\"testlines comparing cases and breaks parser beginnings......\")\n counter = 0\n secondcounter = 0\n print(x)\n #for item in stringtest:\n newlist=stringtest.split()\n print(newlist)\n #not using breaks just continue no else\n for item in newlist: # will become: loop thru newlist\n if item == \"case\": #this was teh bug the semilocon at end of word break;\n counter +=1\n continue\n #end if\n print(\"number of cases in string =\", counter)\n print();\n\n for item in newlist:\n if item == \"break;\": #this was teh bug the semilocon at end of word break;\n print(newlist[secondcounter]) #get location of item in list\n #print(\"location of word break\",sweet)\n secondcounter +=1\n continue\n #end if\n print(\"number of breaks in string =\", secondcounter)\n print();\n\n if counter == secondcounter:\n print(\"cases and breaks correspond together\")\n else:\n print('cases and breaks not same number')\n #still need to check that a break precedes a case\n print()\n print(\"this is bullshit a tab is not the same as 4 spaces\")\n print()\n # what I see is that th ehuman knowledge stays in my brain and\n # it needs to be in a knoweldgebase that I can search like my plum searcher\n\n\ntestlines(stringtest) #this calls it\n\n\n#now test a return inside of a while true\n#actaully break jumps out of a while true\n# and return exits a function\n\n\n##=============== testing theory here ==========\nlisttest =[]\n\n\n#testing using return to bail from testing_switch function\n\ndef testing_switch(z): #simulating a switch\n print(\"TESTING SWITCH here ...\")\n print(\"testing_switch called with 1 for z\")\n while True:\n if z == \"1\":\n print(\"it is 1\")\n listtest.append(\"goto Chilis\")\n print(listtest)\n return; #this exists the testing_switch function\n elif z == \"2\":\n print(\"it is 2\")\n break;\n elif z == \"3\":\n print(\"it is 3\")\n break;\n else:\n print(\"none of the above\")\n break;\n #default\n #end while true\n\n\n\n\n\n\ndef catcher_follows():\n #testing theory this should create a flag for yes\n #or rather determine if the flag is true for goto\n #and snag the label destion such as chilis.\n flag = False;\n print(\"catcher_follows called\")\n if listtest[0] != None:\n print(\"there is something here\")\n stringtest = listtest[0];\n if stringtest.startswith(\"goto\"):\n print(\"yes it starts with goto\")\n print(listtest[0])\n print(\"where to goto\",listtest[1])\n flag = True;\n else:\n print(\"nope it doesn't start with goto\")\n print(\"THIS SHOULD NOT Be called\")\n\n else: #means empty if reaches here\n print(\"there is nothing in it. \")\n #End of if not = None\n\n #this is now triggered if flag is true for goto\n if flag == True:\n print('the flag is true')\n #then I would dynamically put together goto label;\n #then I would call it - so it's called after switch\n goto(listtest[1]);\n else:\n print(\"the flag is false\")\n\n\n\nprint(\"THIS IS WHERE THE TESTING ALPHA BEGINS\")\ntesting_switch(\"1\") #represnting switch case here\n#so i drop out of the switch case and then test\n#with catcher_follows to see if a goto was called and if so\n#then trigger it with eval build it and then exec() to call it\n\ncatcher_follows() #this is calls flowing down after switch\n\n\n\n\ndef gofish(x):\n return int(math.sqrt(x))\n\na =gofish(100)\nprint(a)\n\n# testing my parser turning a string into an ordered\n# list so I can test juxtraposition\n\nparsed_string = []\n\nstring_input = \"what is this for phrase in valentines\"\nfun = string_input.split(\" \") #separates by space\nprint(\"the new list = \",fun)\n\nprint(\"\");\n#see if I can teach it the word first\n#which in this context will have to be a function\n#testing returning the first word\ndef first(): #this is a function to grab the first word\n #print(fun[0]) # in the list that was just created; cool this lets me see what it sees\n return fun[0];\n\n#we know it's a word so I don't need to say first_word\ndef last():\n #print (fun[-1])\n return fun[-1];\n\nif first() == \"what\":\n print(\"what is the first word\")\nelse:\n print(\"first word is not what\")\n#end if\n\n #list funcgions\n #first, rest, last\n #get last word in list\nprint(\"this should return the first word in the string\")\nprint(first())\nprint(\"this should return the last word in the string\")\nprint(last())\n\n\nprint(\"we are here now ...............\")\n\n\n\n\n\n\n\n\n'''\n\n#################################\n## S W I T C H C A S E ##\n#################################\n\n# value you want to run thru switch case\n\n#global x\n#x = \"strawberries\" \n #<<=== x must be a string just as matching case == \"string\",\n #<<=== if using a number it will be converted to a string\n #<<=== so x = 22 will work and be converted to \"22\"\n# ======= main ===================================\ndef main():\n snowflakes('strawberries')\n\n#casesList = [\"one\", \"word\", \"rudolph\", \"snow\", \"google\",\"fishfood\", \"probability\", \"22\"]\n#\n#switch and fallthru will need to be imported methods\n\n# ======= switch =================================\ndef switch(x):\n print('\\n');\n if type(x) != str: #checks to make sure it's a string if for example a number is passed as x\n x = str(x)\n global case\n case = x.lower()\n # print(\"inside entry point rabbit hole\")\n #print(\"of switch case and case = \",x)\n\n# ======= fallthru =========================\ndef fallthru(y):\n eval(\"switch('\" + y + \"')\")\n\n ############ these are new haven't tested them yet\n ##################################################\ndef jumpgoto(x): #needs to be for label\n eval(\"mylistgoto.append(\"' + x +'\")\");\n\ndef getfromlist():\n print(mylistgoto[0])\n return mylistgoto[0];\n#===== SWITCH CASE CODE demonstration is inside function testfunction(x) below =========\nmylistgoto = [];\n\n\n\n\n\n\n\n\n\n##################################################\n###########################################\n\n# ======= testfunction ========================\ndef snowflakes(x):\n print('test function testing switch case behavior')\n\n# ======== switch case code =======================\n print(\"testing SWITCH CASE upper normal code test:::\");\n\n switch(x) #<<====== switch() method is here\n while True: #<==== infinite loop used for fall thru method\n\n if case == \"one\": #as method case(\"one\"): an inner function\n print(\"yes it's one\")\n break\n\n elif case == \"word\":\n print (\"switch case behavior works in Python now!\")\n fallthru(\"rudolph\") #<<===== fallthru() method is here *don't use* break with fallthru()\n #jumpgoto(\"rudolph\")\n #<<===== currently it requires the next case match in quotes\n elif case == \"rudolph\": #<<===== but later I will make it work using just fallthru()\n print (\"go reindeer\")\n\n break\n\n elif case == \"snow\": #testing this one\n print(\"matching snow\");\n print (\"will it work\");\n break\n #this is totally cool because case can be any item in the list\n elif case in ['google', 'fishfood', 'probability']: #<<==== several cases in list on one line\n print (\"coding\")\n break\n\n elif case == \"22\":\n print (\"gui design\")\n break\n\n #default\n\n else:\n #elif case not in casesList: #if the case word or phrase is not in list cases\n print(\"this is the normal code test not the lower one\")\n print(\" this is the NORMAL CODE sorry no matches for\", case)\n break;\n #the while loop effectively ends the block of code\n #this is done by one line of white space and indentation\n\n#end loop\n#end switch\n#snowflakes('snow');# testing this out\n\n#####====================================================\n\n\n\n#===== SWITCH CASE CODE demonstration is inside function testfunction(x) below =========\n\n\n\n'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef thursday_fun():\n print(\"fun day in the rain today its raining\")\n\nthursday_fun();\n\n#this is code not in a function\n#this stirng is not in a function\n#so the indentation was from the left margine\n\n\nstring1 = \"\"\"\nprint(\"=== what we have here is a failure of communicate===\")\\n\nprint(\"I am inside of string1\")\\n\ntest = \"one\"\\n\n#spacer\nif (test == \"one\"):\\n\n print(\"easy now\")\\n\n print(\"this test is working!!\")\\n\nelse:\\n\n print(\"not working darn\")\\n\n\"\"\"\n\nexec(string1); # what this does is run the string code in\n # the string above.\n\nso_close = \"\"\"\n case 'Oranges' #first word and second word or phrase in quotes\n \"\"\"\n\n #stick these in a list first and second so 0 and 1 position\n#smart good thinking Southwood\n\ndef new_case(the_name):\n caseline = \"if case == \" + \"'\" + the_name + \"'\" + ':'\n print(caseline)\nprint(\"\\n\")\nprint(\"testing printing a line of a case condition\\n\")\nnew_case(\"Oranges\")\n\n\n\n #how do I put a prefix in front of a string\n #then put \" == \" after case\n # elif case == \"word\":\n\n #I can use the input from a list with the case and word or phrase\n\"\"\"\n and then use another list that I add to the front\n and then fill in the paramaters and append it\n in python\n insert() to the front of a list\n listename = []\n listename.append(whatever)\n \"\"\"\n\n #def myreplace(string,,b): #name of function bug spelling counts\n # anewstring = string.replace(a,b)\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef replace_casecode(): #name of function bug spelling counts\n print();\n print('test REPLACE A STRING replace default with else:')\n mystring = \"funny coffee money good default\"\n # Prints the string by replacing geeks by Geeks\n print(\"original =\",mystring)\n weasel = mystring.replace(a,b)\n print(\" weasel =\", weasel)\n print();\n\n##\ncase = \"two\"\nx = case\n\n#===================================\n### HERE I AM EXECUTING THE SOON TO BE GENERATED DYNAMIC PYTHON CODE\n## I STILL NEED TO ADD THE \\t front of a line AND /n appended to the end of line\n#so I will be using fuzzy logic and flags\n# thinking about this more seriously Monday April 27th, 2020\n# I hear a train in the distance whistling it's loud horn\n#def substracttopdefinelines(stringer):\n #loop thru string\n #split() string at end of defines\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef trynow(): #this is not being called == funny\n #this is after the macro expansion\n #this is after the indentation is added\n go_string = \"\"\"\nprint(\"::::::::::::::::\")\\n\nswitch(x) \\n\nprint(\"case =\",case)\\n\nprint(\"x =\", x)\\n\n\nprint(\"I am in this switch case test now\")\\n\nwhile True:\\n\n\\tif case == \"one\": #as method case(\"one\"): an inner function\\n\n\\t\\tprint(\"yes it's one\")\\n\n\\t\\tprint(\"jumpint out of switch case generated here for one ==\")\\n\n\\t\\tbreak\\n\n\n\\telif case == \"two\":\\n\n\\t\\tprint (\"switch case behavior works in Python now!\")\\n\n\\t\\tprint(\"we have two chosen!!!\")\\n\n\\t\\tprint(\"It should have chosen two from case above the string\")\\n\n\\t\\tfallthru(\"three\")\\n\n\n\\telif case == \"three\":\\n\n\\t\\tprint (\"go three\")\\n\n\\t\\tbreak\\n\n\n\\telif case == \"four\": \\n\n\\t\\tprint(\"matching snow\")\\n\n\\t\\tprint (\"will it work\")\\n\n\\t\\tbreak\\n\n#testing this (I added this and forgot)\n\\telif case in ['five', 'six', 'seven']:\\n\n\\t\\tprint (\"coding\") \\n\n\\t\\tbreak\\n\n\n\\telif case == \"eight\":\\n\n\\t\\tprint (\"the word eight was matched - good job\")\\n\n\\t\\tbreak\\n\n\\t#default\\n\n\\telse:\\n\n\\t\\tprint(\"this is the generated else down below *** inside of test for two down below\", case) \\n\n\\t\\tprint(\"this is the SHOULD BE BAD NEWS else inside of test for two down below\", case) \\n\n\\t\\tprint(\"sorry no matches for\", case) \\n\n\\t\\tprint(\"......the end\")\\n\n\\t\\tbreak\\n\n\"\"\"\n #newstring = macros(go_string)\n #go_string = newstring;\n #needs to modify the go_string\n #do macros first\n # #define fudge with 'two\":'\n #it can't execute it until the code is accurate\n\n print(go_string); #this shows the (sooon to be ) generated python source\n # second_gostring =substracttopdefinelines(go_string)\n exec(go_string); #this runs the code executed and interpreted\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nsmartlist=[]\n\ndef trucks():\n print(\"testing trucks,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\")\n smartlist.append('buttefly')\n smartlist.append(\"bird\")\n print(\"length of trucks list\", len(smartlist))\n print(smartlist)\n print()\n\ntrucks();\n\n\n\n###3==========================================================================\n# testing goto here now for labels\n#//============================================================================\nlistoflabelcallsinorder =[]\n\ndef switchy(x):\n print(\"SWITCHY CALLED called with\",x)\n listoflabelcallsinorder.append(x)#this adds label switched to to list\n print(\"the length of calls = \",len(listoflabelcallsinorder))\n print('\\n');\n if type(x) != str: #checks to make sure it's a string if for example a number is passed as x\n x = str(x)\n \n global label\n\n label = x.lower()\n print(\"label=\",label)\n\n\n # print(\"inside entry point rabbit hole\")\n #print(\"of switch case and case = \",x)\n #gotoflag = False; #default\n\n\n# ======= jumpto =========================\ndef jumpto(y):\n print('JUMPTO called',y)\n switchy(y)\n \n\ndef firstlabel():\n print(\"...burp....\");\n\n\ndef secondlabel():\n print(\"..2a ........0second label called\");\n print(\"testing this out some more\")\n\nmemory_list = []; #global by being outside of methods\n\n#this has to be after the body of switch case and goto triggered true\n#and chosen labeland break from whiletrue\n\n#==========================================\n# feed_goto()\n#==========================================\ndef feed_goto(val): #if this is called switch already started need a bypass to hop directly to destination/catch dropopin\n print(\"feed_goto method called\", val) \n memory_list.append(val);\n memory_list.append('true'); #I need to put the flag value in list too\n print(\"feed_goto=memory_list\",memory_list)\n return;\n\n\n\n#==========================================\n# catch_dropping_goto()\n#==========================================\ndef catch_dropping_goto():\n print(\"CATCH DROPPING GOTO method called=====\")\n print(\"this is catch dropping goto method\")\n print(\"memory_list=\",memory_list)\n if memory_list[1] == \"true\": #I need to put the flag value in list too\n # get value of label\n #c = get_destination_memory() #where to goto\n gohere = \"'\" + memory_list[0] + \"'\" #this holds labelname*\n\n dothis = eval('jumpto(gohere)')\n print(dothis)\n exec(str(dothis)) #the question is from outside switch\n #this now officially triggers the goto\n # we are now in the domain of the label switch\n #will the jumpto method work?\n else:\n print(\"flag = false\")\n return; #meaning do nothing\n\n#end of catch_dropping_goto()\n\n\n#important design specs I dreamed up late last night\n# Monday June 22, 2020\n'''\n#goto thinking\n#switch case body for structure stripes\n#start at top of flow down encounter a gotoand normally jump down to labelseveral gotos many paths\n#feed goto where it says gotoafter end if switch and while tru loop \n#catch falling gotos method\n#\n#but if goto triggerd don't begn at startso 2nd specialswitch for goto redirects \n#that ouls solve problem\n#and when elaving freed goto set to falsefor readaibliyt call it exit-goto\n#\n#to pass thru catch falling gotos beneath switch case block \n#will be set to false and if false pass through catch_falling_gotos\n#\n#the switch case behavior and appearnce \n#in C looks very different from C gotosC macrosand Lisp macros too\n#I ahve the switch case figured out and good progress on the gotos working\n#\n#switch case fallthru() if no break\n#mulitiplel cases figured out\n#'''\n\n\n\n\n\n\n\n\n#I think that what this is if jumping out of inner switch case\n#I put the destination into memory in a list \n#and run a function after inner switch case if true then jumpto destination memory list\n#and if false do nothing, a simple filter and that way I can jump out of an inner switch\n#case which is a trick, an illusion.\n\n\n#putinto memory\n#then catch it\n############################ this is funnel after switch case\n######### what this does is dynamically recreate goto\n######### if goto flag = true and calls it as if it was\n######### triggered within the body of the switch case itself\n #catch_dropping_goto()\n #if goto flag == True: #I need to put the flag value in list too\n # get value of label\n # dothis =eval('jumpto(' + value + ')')\n # exec(dothis)\n #else:\n # return: #meaing do nothing\n\n'''\n#for ( ... ) { ...\n# if (disaster)\n# goto error;\n#} ...\n#error:\n# /* clean up the mess */\n\n\n#for (i = 0; i < n; i++) for (j = 0; j < m; j++)\n# if (a[i] == b[j])\n# goto found;\n#/* didn't find any common element */\n#found:\n#/* got one: a[i] == b[j] */\n#'''\n\n\n\n#turned off tahoe_goto_test to eliminate this being the bug\n\n#tahoe_goto_test(\"test_inner_switch\"); #here we go - hang on\n# this should call second and then first and then break out of infinite loop\n\ndef showpathitdid():\n return\n print(\"#####==SHOW PATH IT DID CALLED\")\n length = len(listoflabelcallsinorder)\n print(\"number of labels jumpedto =\",length)\n #print(\"path it took =\", listoflabelcallsinorder)\n #print(listoflabelcallsinorder[0])\n thelength = len(listoflabelcallsinorder)\n for name in range(thelength):\n print(listoflabelcallsinorder[name])\n print(\"done printing dam list\")\n\ndef make_it_so():\n print('testing if watch_cnn_now actually called or not')\n\n\n#watch cnn now this is practicing with the real goto code.\n#==========================================\n# watch_cnn_now() I need to encase a switch case into a function for scope\n# and so that I can use a return to jump out instantly from anywhere\n# and I need to find my catchfallinggotos which is called immediately after\n# switch case (oh idea called by switch case if flag says goto called\n\n#==========================================\ndef watch_cnn_now(): #no argument for the goto labels to start\n return # so it does nothing junw 9th\n print(\"watch_CNN_now called .........***......testing this now \")\n make_it_so();\n x = \"starter\"\n #breakpoint() #testing this to see if it works\n print(x);\n print(\"inside of WATCH CNN NOW running\");\n print()\n print(\" --- watch_cnn_test() function ---\")\n print()\n ##this is for making goto structure for labels\n ## and to use gotos\n ###############################################\n\n # print(\"switchy(x) here inside of another function watch_cnn_now\")\n # print(\"I think that that is the bug I need a seperate switchy_name\")\n #print(\"or what I could do is have a second variable for uniqueness\")\n #rint(\"for a particular method that it's used in so it runs properly. \")\n print(\"x ====\", x)\n switchy(x) #this is really switch(x) #fake loop where switch normally would be #<<====== switch() method is here\n tester_counter = 0;\n while True: #Actual outter loop #<==== infinite loop used for fall thru method\n #infinite loop here for this to work\n tester_counter += 1;\n print(\"tester_counter=\",tester_counter)\n #g = input(\"type a letter : \")\n #print(g) #this acts as my breakpoint so I can see a var value\n #i can rename this brkpnt()\n #this is \"starter\" which is the top of the function since this way it starts top down\n\n if label == \"starter\": #as method case(\"one\"): an inner function\n print(\"STARTER label just reached...........\")\n print('starter worked... finally')\n # print(\"onea\") in the coprocessor mode there is no starter label\n #print it is nonetheless used as a primer\n print('onea label entered')\n #firstlabel();\n print(' 1a == jumped up tpo')\n print(\"this jumped to label onea above it\")\n print(\"counter thru loop =\", tester_counter)\n #it's a string\n apple = \"2\"\n if apple == '1':\n jumpto(\"starfish\") #this will say: goto blueberry;\n #print(\"this should not print anything\")\n else:\n jumpto(\"dolphin\") #this will say: goto waterfall;\n #print(\"this should not be printed either\")\n\n\n #jumpto(\"waterfall\"); #notice no break; afterwards\n\n \n elif label == \"waterfall\": # so if twoa is called it does it and jmps up to onea then breaks out\n print(\"WATERFALL label just reached...........\")\n print('at waterfall label now in switch case for labels')\n print(\"============\")\n # print (\"goto label twoa behavior works in Python now!\")\n #secondlabel(); #prints second label information\n print(\"$$ watefall \");\n list1 = [\"snowing\",\"wondering\", \"tennis\"]\n print(\"counter thru loop =\", tester_counter)\n\n print(\"list1[0] =\",list1[0]);\n if list1[0] == \"snowing\":\n print(\"true\")\n jumpto(\"threea\");#this is a goto label test\n else: #below was not called\n print(\"not jumping to threea failed\")\n print(\"counter thru loopy =\", tester_counter)\n jumpto(\"default\"); #testing\n #break; #or break out with default\n #end if\n\n\n #end for loop\n #break;\n #print('do stuff here in second label')\n #jumpto(\"onea\");\n #NORMAL SWITCH CASE CODE HERE SWITCH AND CASE AND ELSE: USED\n #don't use a break here\n #goto \"threea\" label #<<===== fallthru() method is here *don't use* break with fallthru()\n #can't use a break here\n #this is treated as a LABEL #<<===== currently it requires the next case match in quotes\n \n elif label == \"dolphin\":\n print(\"dolphin label just reached.................\")\n #put 'default' into destination list\n #jumpto(\"default\")\n feed_goto(\"default\") #puts data in list to access sets flag to true\n print(\"should leave switch case\")\n break; #to get out of switch case\n #print(\"this will not print right?\")\n #break;\n\n \n\n elif label == \"threea\": #<<===== but later I will make it work using just fallthru()\n print(\"THREEA label just started.............\");\n print (\"$$ ==3a go threea\")\n print(\"this jumped below it to threea\")\n # print(\"we have successfully jumped out of SWITCH CASE\")\n #jumpto(\"twoa\")\n print(\"counter thru loop =\", tester_counter)\n #break; #necessary to leave the while True: infinite loop\n print(\"about to start nested loop test jumping\")\n i = 1\n #counter = 0\n print(\"while loop and jumping out of it as a goto\")\n print(\"this is inside of threea\")\n print(\"doing loop here and jumping out ====\")\n while i < 5: #this is definitely a loop\n i += 1;\n print('i =', i)\n if i == 2:\n print(\"i = 2 now\")\n print(\"just before jumping to strawberry called with goto as jumpto\")\n jumpto(\"strawberry\");\n #print(\"now is the time to get the fly\")\n #don't put anything after the jumpto(word) because it's pointless \n #break;# to break out of this little while\n #if i == 3:\n # print(\"i = 3 now\")\n #jumpto(\"blueberry\");\n\n\n\n #goto blueberry;\n #why did this work.\n\n #this will be at the bottom anyways\n \n \n \n elif label == \"blueberry\":\n print(\"BLUEBERRY label just reached.................\")\n print(\"we are in blueberry from a loop \")\n x = 1\n while True:\n x += 1;\n if x == 3:\n jumpto(\"default\"); #and this is up above how interesting. \n #break;\n\n # print(\"just got visited by sheriff\")\n break; #to get out of the goto switch (this wasn't called so it does nothing)\n \n elif label == \"strawberry\":\n print(\"STRAWBERRY LABEL just reached..................\")\n print(\"strawberry here that I jumped to\")\n print(\"so I jumped out of a while loop with goto to label below it\")\n #jumpto('default');\n #break; #all impportant. I predict they will be back\n break;\n #what I learned is a break ends the whole goto assembly\n #and I can't have a break after jumpto(word) for some bizarre reason\n \n elif label == \"starfish\":\n print(\"STARFIX label just reached.................\")\n x=25\n y=0\n while y < x:\n y+= 1\n print(y)\n if y == 10:\n\n jumpto(\"default\")\n #thats the bug\n \n #print('do not print this')\n #break;\n\n\n \n\n\n elif label == \"default\": #<<===== this will be invisible not visible.\n \n print()\n print(\"DEFAULT label just reached...................\")\n #print(\"used goto to get here from three nested loops!!!!\")\n print(\"@@ welcome to default land @@\")\n print('successfully jumped to DEFAULT BABY')\n print(\"default triggered to leave the function but no default label in reality\")\n print(\"counter thru loop =\", tester_counter)\n print();\n \n break; #safety to end the while loop\n #there will be no unauthroized jumps\n\n else:\n print(\"else called the escape since a label was called........\")\n break; #maybe it needs this who knows\n #this is acting as just a jump table and there will be no default necessary\n #end of label switch case\n \n\n\n\n\n#watch_cnn_now();\n#print(\"burp test here\")\n#print(listoflabelcallsinorder)\n#showpathitdid()\n\n#scatch_dropping_goto()\n\n\n#Bshowpathitdid();\n\n\n\n\n\n\n\n\n\n\n'''\n#output should look like this from above\n\n\n'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#============================\ndef replace_a_string(a,b): #name of function bug spelling counts\n print(\"look at these two strings %%%%%%%%%%%%%\")\n print(\"testing replacing a string so work on macros expansion\")\n print();\n print('test REPLACE A STRING replace default with else:')\n mystring = \"funny coffee money good default\"\n # Prints the string by replacing geeks by Geeks\n print(\"original string=\",mystring)\n weasel = mystring.replace(a,b) #this is creating a new string\n print(\" weasel new string =\", weasel)\n print();\n\n#replace_a_string(\"default\", \"else:\")\n\n\n\n\n\n#working on appearance of C goto layout within a function\n#==========================================================\n\n#what it will look like but for loop and labels will need to be in triple string \"\"\"\n'''\n#somethinglist = ['testing1','testing2','testing3','testing4']\n#\n#for item in somethinglist:\n# if item == \"testing2\":\n# goto error; #C style without braces \n# else:\n# print(item)\n#\n#error:\n# print(\"error label reached\")\n'''\n\n\n#might need a second label to be directed to if not an error\n#thatwould go after initial label block\n\n#========================\n #in python it will look like this \n #need starter label at top to start at beginning of the function \n #and I will need to import methods switch(), jumpto(), feed_goto(), catch_dropping_goto(), switchy()\n'''\nif label == \"starter\": #so it starts at the top by default possibly if label == starter and first = true\n for item in somethinglist:\n if item == \"testing2\":\n print(\"about to goto error label\")\n jumpto(\"error\") #C style without braces \n else:\n print(item)\n'''\n'''\nif label == \"error\":\n print(\"error label reached\")\n\n'''\n\n\n\n# this will not be seen or interpreted by the\n# interpreter for python because it's not python code\n# it's javascript syntax code which it can't read\n# nor understand\n\n#I will add breakpoint\n#inner switch\n# show code gen()\n#to debug trace code path\n#macros also fiddle around with those and do while and swap\n#this will allow macros later on so the the first pass macros will be expanded\n\njs_switch_string = \"\"\" //you can comment C style insde of it\nvar expr = 'oranges'; //you can use var if you want\n\nswitch (expr) { //brace is optional /* this switch a function call that is imported */\n case 'oranges':\n print('Oranges are $0.59 a pound.'); //should return oranges\n break; //breaks don't required the semicolon but it's okay to use\n case 'apples':\n print('Apples are $1.55 a pound.');\n break;\n case 'mangoes':\n case 'papayas': //this works using in with Python\n print('Mangoes and papayas are $2.79 a pound.');\n # expected output: \"Mangoes and papayas are $2.79 a pound.\"\n break;\n default: //must have a default\n print('Sorry, we are out of ' + expr + '.');\n break; //optional break but recommended\n} //optional pure taste I like it with the brace actually.\n\"\"\"\n\n#print(js_switch_string) #this shows the C style switch case\n\n#this is just playing\n#a = 4\n#b = 3\n#\n#testing genrating some switch code\n#friday night 24th of april at garlic farm truck stop\ndef generate_pyswitch():\n var_one = '\"one\":'\n teststring = '''\n switch(x)\n\n while True:'''\n\n print(\"testing generating some simple switch code in python\")\n print(teststring)\n badass =\" if case ==\"\n # x = badass.ljust(4) # four spaces from left indentation\n #print(x)\n print(badass, var_one)\n print()\n\n#generate_pyswitch(); #testing small parts of it right now\n\npalomar = '''\nwords here\nmore words\ncase \"onions\":\nprint(' some words ');\nprint(' another day ');\nbreak;\n'''\n##########@@@@@@@@@@@@@@@@@@@@@@@ THURSDAY CODING ###############\n#CODING IN THE WOODS MAY 7TH IN THE SHADE OF REDWOODS\n#thinkint or mqking a new string and building top down\n# right now\n#how do I grab what is in quotes\n#at top look for switch(exp){\n#at bottom of switch case look for } or endswitch\n\n\n\n\n\n\n\n\n#============\ndef read_case_convert_to_if():\n print('read case convert to if')\n #get line( strg)\n #print(format.string.replacewhole line)\n #case 'oranges':\n #becomes \"if case == \" + word + \":\"\n#determine number of lines between case and break for a case;\n\n#determine if case on several lines before case not on a line\n#determine if a break and what line Number\n#determine if default word exists (it needs to exist)\n#thursdady, may 7th take input the case 'word':\n# and output if case == \"theword\":\n\n\n\n\ndef getword():\n print(\"called function getword()=======//===>>>\")\n #txt = \"case 'oranges:\"\n txt = \"case 'apples':\"\n x = txt.split(\"'\")\n print(x)\n print(x[1])\n fun = x[1];\n newfun = fun.rstrip(':')\n print(newfun)\n quote = '\"'\n victory = \"if case == \" + quote + newfun + quote + \":\"\n print(victory)\n\n#getword();\n\n###########3==========================================\n\ndef count_lines(thestring):\n cool = len(thestring.split('\\n'))\n txt = \"the length is {} lines \"\n print(txt.format(cool)) #this grabs text from above line\n #print(\"the string is\",toosmart)\n\n # print(f'{cool} in {str}')\n # print(\"there are {0} lines in\".format(count_lines(str),str))\n\nfirststring = \"palomar\" #also I can reference it in a list\nsecondstring = \"js_switch_string\"\n\n\n\n\ndef felix():\n print('testing Manipulating Strings')\n thenumber = count_lines(palomar)\n print(\"name of the string is\", firststring)\n print()\n thenumber = count_lines(js_switch_string)\n print(\"the name of the string is\",secondstring)\n print()\n\n\n\n #test for default\n #test for at least one case (count them)\n\n #break it down into case sections\n\n\n\n\n\n\n\n\n\n\n\n\n#count cases in the switch case string\ndef count_cases(stringname):\n print(\"count_cases function called\")\n string_to_search = \" case \"\n list_of_results = []\n print(\"count_cases called\")\n txt = stringname;\n #so I will have to use eval to dynamically insert it\n x = js_switch_string.count(\"case\") #count case\n print(\"there are\", x, \"cases in \", stringname)\n\n print('the god damned linenumber that case is on ')\n matched_lines=[line for line in js_switch_string.split('\\n') if \"case\" in line]\n print(matched_lines)\n linenumber = 0;\n for line in js_switch_string.split('\\n'):\n linenumber +=1;\n if \"case\" in line:\n print(\"linenumber is\", linenumber-1)\n #because the first line has nothing on it it's the quote\n\n\n #this regturns 3, , 6, 7 for line number of the actual cases that are each on only one line\n #and we know if the numbers are sequential\n #it means several cases for one condition\n #we know that the last line for a case is break\n #so we can get the line numbers\n #and we know that 5 is a break and 3 is case\n #so line 4 is the statement for first case starting on 3\n\n #now can I grab that line\n #so to grab line 4 it's really line 5 for it is +1\n #these are in the order of the cases top down\n #these are the lines after each case within a case body that have python code within them\n #I will need to create a variable to determine how many lines exist between cases\n #I will need to determine if cases arde sequential with no lines in between and how many cases\n\n avoid_list = [\"case\", \"switch\",\"break\",\"default\",\"const\", \"#\"]\n end_matched_lines=[line for line in js_switch_string.split('\\n') \\\n #if all(avoid_list) not in line ]\n if \"case\" not in line and \\\n \"break\" not in line and \\\n \"switch\" not in line and \\\n \"default\" not in line and \\\n \"const\" not in line and \\\n \"#\" not in line ]\n\n\n #I want this to say \"if all(avoid_list) not in line\n # if foreach(item,avoid_list) not in line:\n\n\n print(end_matched_lines) # these returns the list of text\n\n #here I put the (fed) the lines for each case statements\n #into a list to access and reuse since they are in order\n\n output_list =[]\n for x in range(len(end_matched_lines)):\n output_list.append(end_matched_lines[x])\n print(end_matched_lines[x])\n print(\"let's see the output_list[]\")\n print(\"this is the list I can access now\", output_list)\n print()\n for x in output_list:\n print(x)\n print(\"length of outpout list is = \", len(output_list))\n print(output_list[0])\n print(output_list[1])\n #last oen would be default\n ## Icouldf use my search for list of words in a string\n #that I wrote for mom's poetry that does words\n #and phrases to see if they exist within each poem\n #mom wantedto know if she used a phrase before\n #but I added that feature for a list of words\n\n\n\n\ndef candy(): #this worked it turned the string into a list\n print('candy called')\n #print(f'list of words={js_switch_string.split()}')\n cities = js_switch_string\n cool =cities.split(' ')\n print(cool)\n print()\n\n#candy()\n\n#felix()\n#count_cases(\"js_switch_string\")\n\n#generate list for multiple cases to get something actually done today\n'''elif case in ['google', 'fishfood', 'probability']: #<<==== several cases in list on one line\n print (\"coding\")\n break\n'''\n\n #in switch case instead of len(something) you can do something.length just for the fuck of it\n#def convert_C_multiple_cases_into_list():\n # for each case line\n #I can make my foreach cooler too\n\n# wednesday, April 29, 2020\n# this will have to be generated during the parse to create this\n# using append\ncases_together=[\"google\", \"fishfood\",\"probability\"] #this is python mode\n#I should do this for the parser than to\n#go through grabbing cases that are together\n#and buid the list that way.\n# I was just going backwards\n\n\n#this builds the list in python \"in [\"words\", \"words\", \"words\"]\n#got this working april 29th, 2020\n#need to check (count) number of cases grouped together\n\n#test if cases on this line true or false\n#test if cases ALSO on this line if true multiple\n#test if cases ALSo on this third line mulitpe\n#test if cases also on this fourth line multiple\n#test if cases also on this line\n#test test for when there isn't a case on a line after first line with case\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef multiple_cases_scenario(cases_together):\n print(\"?????multiple cases scearnio to build list\")\n # this will actually be used in parsing to create python\n # version of the multiple cases that works in python\n # when I do the parse I will buid this list and then reuse\n # that's much simpler\n\n # get length of list\n casestogetherlength = len(cases_together)\n # loop thru list\n # append cases names\n\n makelist = \" in [\"\n count = 0;\n quote = \"'\"\n cool = \"\" #starts with nothing in it\n cool + quote;\n for item in cases_together:\n sweet = item #one word from list\n #print(sweet)\n\n #count += 1;\n if count != len(cases_together):\n cool += sweet + quote #+ \",\";\n if count != len(cases_together): #this is new now\n cool += \",\";\n\n count += 1;\n print('cool sees ',cool)\n if count != len(cases_together):\n cool + \",\";\n print(\"now cools sees\", cool)\n\n #if count == len(cases_together):\n if count != len(cases_together):\n cool += quote;\n\n if count < len(cases_together):\n cool + \",\";\n else:\n break;\n #builds list line ['word','word','word']\n #for coolneess you can use count++ which just looks better and more natural\n print(\"cool =\",cool)\n print(\"the count is\", count)\n #print(\"cool =\", cool)\n print(\"wild cool \",cool)\n cool = cool[:-1] #this removes the last char of string which fixes a bug I had\n tail = cool + \"]\" #line above worked great so nonobvious\n print(\"tail =\",tail)\n build_it = makelist + quote + tail\n \n\n print(\"BIGGIE test here \")\n print(build_it) #to see what it sees and if it actually works (so doublful but laughable)\n print()\n print()\n\n# creates a list \"in [\"casename\",\"casename\", etc ]\n# do a loop to count cases\n#multiple_cases_scenario(cases_together); #this calls it\n\n\n\n\n\n\n\n\n\n\n\n\n #============================\ndef replace_a_string(a,b): #name of function bug spelling counts\n print(\"...............testing replacing a string so work on macros expansion\")\n print();\n print('test REPLACE A STRING replace default with else:')\n mystring = \"FOR_ALL we know this will work\"\n # Prints the string by replacing geeks by Geeks\n print(\"original =\",mystring)\n weasel = mystring.replace(a,b)\n print(\" weasel =\", weasel)\n print();\n#so the first word needs to be in there the string\n#replace_a_string(\"FOR_ALL\",\"for(i = 0; i < ARRAY_SIZE; i++)\")\n\n\n\n\n\n\n\n\nmacro1 = ''' #because this must remain untouched,\n I could have it skip first occurrence\n\n #define FOR_ALL for(i = 0; i < ARRAY_SIZE; i++) '''\n\nnow = '''\n #and use it like this:\n #/* Clear the array :this will be the first phrase to test*/\n\n FOR_ALL {\n data[i] = 0;\n }\n '''\n #when the code executes it will ignore the lines with #\n #the issue is that I need the executing code\n#concat macro list to codelist and run it as preprocessor\n#define macro expansion\n#define macro expansion\n\n#just realized the macros will only work in triple strings\n\ntestmacro = '''\n #define FOR_ALL for(i = 0; i < ARRAY_SIZE; i++) {\n #define CHEESE print('have some cheese');\n\n def fakecode():\n print('something') \n FOR_ALL\n do this loop\n if amount < 100:\n CHEESE\n\n '''\n\n #steps\n # '''I could split off the defines at TOP so two strings and only using below it\n #run the replace on existing bottom half string so it doesn't see the defines at top\n #for the replace it uses the top defines string - brilliant\n #\n #loop thru string replacing the lines on bottom\n #then connect the fist string to the resulting string\n #'''\n #may 4th DESIGNING MACRO SWAPPING BEHAVIOR\n'''\ndef macro_expand():\n count how many define macros\n put line into string\n check if #define startswith()\n if true search switch case for word and return codephrase string\n else\n break;\n go thru string of code with loop\n search each line for macrophrase\n if found replace\n '''\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''\n\n\n\nword_list =[\"case\", \"break\", \"switch\", \"default\", \"const\", \"#\"]\ndef cottoncandy():\n print(\"cotton candy called to see if several words in string\")\n #just loop thru a list\n counter = 0\n print(js_switch_string)\n #start loop\n for word in word_list: #attempting a macro now maybe later\n counter += 1\n print(word) #this prints out the word in list on each loop\n #now I have to modify this to search for all words on each line\n if word in js_switch_string:\n print(\"yep\", word)\n else:\n print(\"nope\")\n #end loop\n\ncottoncandy();\n\ndef rockyroad():\n print(\"testing rockyroad string manipulations\")\n stringtime = \"testing this out\"\n stringtwotime = \"not testing this out\"\n weird = stringtime + \"*\" + stringtwotime;\n print(\"weird=\",weird)\n\nrockyroad();\n\n\n\nif __name__ == \"__main__\":\n main()\n'''\n\n#paul graham the 100 year language - reffering to a programming language in the future\n\n#April 2003\n\n\n\n\n","repo_name":"blakesouthwood/Santa_Cruz_Python_Preprocessor","sub_path":"nested_switches_gold.py","file_name":"nested_switches_gold.py","file_ext":"py","file_size_in_byte":56540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"75013108423","text":"import pretty_midi\nfrom pretty_midi import PrettyMIDI\nfrom scipy.io import wavfile\nfrom core.performance_rnn.sequence import EventSeq, ControlSeq, Control\nfrom torch import Tensor\nimport numpy as np\nimport torch\nimport joblib\nimport copy\nfrom typing import List\n\n\ndef midi_to_wav(pm: PrettyMIDI, filename: str, rate=16000):\n waveform = pm.fluidsynth(fs=rate)\n wavfile.write(filename, rate, waveform)\n\n\ndef pm_to_wav(pm: PrettyMIDI, filename: str, rate=16000):\n waveform = pm.fluidsynth(fs=rate)\n wavfile.write(filename, rate, waveform)\n\n\ndef event_seq_to_wav(event_seq: EventSeq, filename: str, instrument='Violin', rate=16000):\n pm = event_seq_to_pm(event_seq, instrument)\n midi_to_wav(pm, filename, rate=rate)\n\n\ndef event_seq_to_pm(event_seq: EventSeq, instrument: str) -> PrettyMIDI:\n program = pretty_midi.instrument_name_to_program(instrument)\n pm = event_seq.to_note_seq().to_midi(program=program)\n return pm\n\n\ndef event_seq_to_midi(event_seq: EventSeq, filename: str, instrument='Violin'):\n pm = event_seq_to_pm(event_seq, instrument)\n pm.write(filename)\n\n\ndef tensor_to_pm(tensor: Tensor, instrument='Violin') -> PrettyMIDI:\n event_seq = tensor_to_event_seq(tensor)\n pm = event_seq_to_pm(event_seq, instrument)\n return pm\n\n\ndef tensor_to_event_seq(tensor: Tensor) -> EventSeq:\n event_seq = EventSeq.from_array(tensor.cpu().numpy())\n return event_seq\n\n\ndef tensor_to_waveform(tensor: Tensor, instrument='Acoustic Grand Piano', fs=22050) -> np.ndarray:\n pm = tensor_to_pm(tensor, instrument=instrument)\n waveform = pm.fluidsynth(fs=fs)\n return waveform\n\n\ndef ndarray_to_pm(array: np.ndarray, instrument='Acoustic Grand Piano') -> PrettyMIDI:\n event_seq = EventSeq.from_array(array)\n program = pretty_midi.instrument_name_to_program(instrument)\n pm = event_seq.to_note_seq().to_midi(program=program)\n return pm\n\n\ndef ndarray_to_waveform(array: np.ndarray, instrument='Acoustic Grand Piano', fs=22050) -> np.ndarray:\n pm = ndarray_to_pm(array, instrument=instrument)\n waveform = pm.fluidsynth(fs=fs)\n return waveform\n\n\ndef batch_tensor_to_batch_waveform(\n tensor: Tensor,\n instrument='Acoustic Grand Piano',\n fs=22050,\n n_jobs=None,\n length=22050 * 6,\n) -> np.ndarray:\n # RuntimeError: Can't call numpy() on Variable that requires grad. Use var.detach().numpy() instead.\n batch_array = tensor.detach().cpu().numpy()\n\n def f(array):\n # print(array.shape)\n waveform = np.zeros([length], dtype=np.float32)\n res = ndarray_to_waveform(array, instrument=instrument, fs=fs)\n\n res_length = min(len(res), length)\n waveform[:res_length] = res[:res_length]\n\n return waveform\n\n # waveforms = joblib.Parallel(n_jobs=n_jobs)(joblib.delayed(f)(array) for array in batch_array)\n waveforms = [f(array) for array in batch_array] \n\n batch_waveform = np.stack(waveforms)\n\n return batch_waveform\n\n\ndef tensor_to_wav(tensor: Tensor, filename: str, instrument='Acoustic Grand Piano', fs=22050):\n pm = tensor_to_pm(tensor, instrument=instrument)\n pm_to_wav(pm, filename, rate=fs)\n\n\ndef pitch_histogram_string_to_control(control: str) -> Control:\n # pitch_histogram, note_density = control.split(';')\n pitch_histogram = control\n note_density = 4 \n\n pitch_histogram = list(filter(len, pitch_histogram.split(',')))\n if len(pitch_histogram) == 0:\n pitch_histogram = np.ones(12) / 12\n else:\n pitch_histogram = np.array(list(map(float, pitch_histogram)))\n assert pitch_histogram.size == 12\n assert np.all(pitch_histogram >= 0)\n pitch_histogram = pitch_histogram / pitch_histogram.sum() \\\n if pitch_histogram.sum() else np.ones(12) / 12\n note_density = int(note_density)\n assert note_density in range(len(ControlSeq.note_density_bins))\n control = Control(pitch_histogram, note_density)\n return control\n\n\ndef pitch_histogram_string_to_control_tensor(control_str: str) -> Tensor:\n \"\"\"\n\n :param control_str: e.g. '2,0,1,1,0,1,0,1,1,0,0,1'\n :return: torch.Size([12])\n \"\"\"\n control = pitch_histogram_string_to_control(control_str)\n controls = torch.from_numpy(control.to_pitch_histogram_array())\n return controls\n\n\ndef cut_pm(pm: PrettyMIDI, duration: float) -> PrettyMIDI:\n new_pm = copy.deepcopy(pm)\n new_pm.adjust_times([0, duration], [0, duration])\n return new_pm\n\n\ndef concat_pms(pms: List[PrettyMIDI]) -> PrettyMIDI:\n res_pm = copy.deepcopy(pms[0])\n print('before: ', len(res_pm.instruments[0].notes))\n for i, pm in enumerate(pms[1:]):\n cur_pm = copy.deepcopy(pm)\n cur_last_time = cur_pm.get_end_time()\n res_last_time = res_pm.get_end_time()\n cur_pm.adjust_times([0., cur_last_time], [res_last_time, res_last_time + cur_last_time])\n res_pm.instruments[0].notes.extend(cur_pm.instruments[0].notes)\n print('after: ', len(res_pm.instruments[0].notes))\n\n return res_pm\n","repo_name":"chuangg/Foley-Music","sub_path":"core/utils/midi.py","file_name":"midi.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"81"} +{"seq_id":"39089516512","text":"\"\"\"Script to populate datastore with regression test data.\"\"\"\n\n\n# This assumes the command is being run via tox hence the\n# repository root is the current directory.\nfrom regression import regression_utils\nfrom six.moves import input\n\n\nFETCH_MAX = 20\nALL_KINDS = [\n 'Character',\n 'Company',\n 'Kind',\n 'Person',\n 'Post',\n]\nTRANSACTION_MAX_GROUPS = 5\n\n\ndef fetch_keys(dataset, kind, fetch_max=FETCH_MAX, query=None):\n if query is None:\n query = dataset.query(kind=kind).limit(\n fetch_max).projection(['__key__'])\n # Make new query with start cursor if a previously set cursor\n # exists.\n if query._cursor is not None:\n query = query.with_cursor(query.cursor())\n\n return query, query.fetch()\n\n\ndef get_ancestors(entities):\n # NOTE: A key will always have at least one path element.\n key_roots = [entity.key().path()[0] for entity in entities]\n # Turn into hashable type so we can use set to get unique roots.\n # Also sorted the items() to ensure uniqueness.\n key_roots = [tuple(sorted(root.items())) for root in key_roots]\n # Cast back to dictionary.\n return [dict(root) for root in set(key_roots)]\n\n\ndef delete_entities(dataset, entities):\n dataset_id = dataset.id()\n connection = dataset.connection()\n\n key_pbs = [entity.key().to_protobuf() for entity in entities]\n connection.delete_entities(dataset_id, key_pbs)\n\n\ndef remove_kind(dataset, kind):\n delete_outside_transaction = False\n with dataset.transaction():\n results = []\n\n query, curr_results = fetch_keys(dataset, kind)\n results.extend(curr_results)\n while curr_results:\n query, curr_results = fetch_keys(dataset, kind, query=query)\n results.extend(curr_results)\n\n if not results:\n return\n\n # Now that we have all results, we seek to delete.\n print('Deleting keys:')\n print(results)\n\n ancestors = get_ancestors(results)\n if len(ancestors) > TRANSACTION_MAX_GROUPS:\n delete_outside_transaction = True\n else:\n delete_entities(dataset, results)\n\n if delete_outside_transaction:\n delete_entities(dataset, results)\n\n\ndef remove_all_entities():\n print('This command will remove all entities for the following kinds:')\n print('\\n'.join(['- ' + val for val in ALL_KINDS]))\n response = input('Is this OK [y/n]? ')\n if response.lower() != 'y':\n print('Doing nothing.')\n return\n\n dataset = regression_utils.get_dataset()\n for kind in ALL_KINDS:\n remove_kind(dataset, kind)\n\n\nif __name__ == '__main__':\n remove_all_entities()\n","repo_name":"dhermes/test-gcloud-on-gae","sub_path":"application/vendor/regression/clear_datastore.py","file_name":"clear_datastore.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"26934256641","text":"import numpy as np\nimport cv2 as cv\n\n# 设置putText函数字体\nfont = cv.FONT_HERSHEY_SIMPLEX\n\n\n# 计算两边夹角额cos值\ndef angle_cos(p0, p1, p2):\n d1, d2 = (p0 - p1).astype('float'), (p2 - p1).astype('float')\n return abs(np.dot(d1, d2) / np.sqrt(np.dot(d1, d1) * np.dot(d2, d2)))\n\n\ndef find_squares(img):\n squares = []\n img = cv.GaussianBlur(img, (3, 3), 0)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n bin = cv.Canny(gray, 100, 150, apertureSize=3)\n\n cv.imwrite(\"developing_images\\\\FFC01EC6-E6F4-4FFF-BBD8-DFC4D0E0A6E1-16.9-canny.jpg\", bin)\n\n contours, _hierarchy = cv.findContours(bin, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n print(\"轮廓数量:%d\" % len(contours))\n index = 0\n # 轮廓遍历\n for cnt in contours:\n cnt_len = cv.arcLength(cnt, True) # 计算轮廓周长\n cnt = cv.approxPolyDP(cnt, 0.02 * cnt_len, True) # 多边形逼近\n # 条件判断逼近边的数量是否为4,轮廓面积是否大于1000,检测轮廓是否为凸的\n if len(cnt) == 4 and cv.contourArea(cnt) > 1000 and cv.isContourConvex(cnt):\n M = cv.moments(cnt) # 计算轮廓的矩\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00']) # 轮廓重心\n\n cnt = cnt.reshape(-1, 2)\n max_cos = np.max([angle_cos(cnt[i], cnt[(i + 1) % 4], cnt[(i + 2) % 4]) for i in range(4)])\n if max_cos < 0.1:\n # 检测四边形(不限定角度范围)\n # if True:\n # 只检测矩形(cos90° = 0)\n # if max_cos < 0.1:\n index = index + 1\n cv.putText(img, (\"#%d\" % index), (cx, cy), font, 0.7, (255, 0, 255), 2)\n squares.append(cnt)\n return squares, img\n\n\ndef main():\n img = cv.imread(\"developing_images\\\\FFC01EC6-E6F4-4FFF-BBD8-DFC4D0E0A6E1-16.9.jpg\")\n squares, img = find_squares(img)\n cv.drawContours(img, squares, -1, (0, 0, 255), 2)\n # cv.imshow('squares', img)\n cv.imwrite(\"developing_images\\\\FFC01EC6-E6F4-4FFF-BBD8-DFC4D0E0A6E1-16.9-rec.jpg\", img)\n ch = cv.waitKey()\n\n print('Done')\n\n\nif __name__ == '__main__':\n print(__doc__)\n main()\n cv.destroyAllWindows()","repo_name":"VincentLi1216/TomatoSoup","sub_path":"ImageProcessing/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"2342298163","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : entry.py\n# @Author: Betafringe\n# @Date : 2020/1/5\n# @Desc : \n# @Contact : betafringe@foxmail.com\n\nimport sys, os, json\nfrom flask import Flask, render_template, request, Response\nfrom flask import jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom services import ret_r\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = \"4037e9565e69cc2668f0d95fe1899621\"\napp.config['JSON_AS_ASCII'] = False\n# app.config['SQLCHEMY_DATABASE_URI'] = '_sqlite:///site.db'\n# # db = SQLAlchemy(app)\n\n@app.route('/index.html')\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n# task/v1/analyse/?carType=name\n@app.route('/task/v1/analyse/', methods=['GET'])\ndef task_analyse():\n ret_data = {}\n if request.method == 'GET':\n car_name = request.args.get(\"carType\")\n ret_data = ret_r(car_name)\n else:\n pass\n return jsonify(ret_data)\n\n\n@app.route('/charts/charts.html')\ndef charts():\n return render_template('charts/charts.html')\n\n\ndef Response_headers(content):\n resp = Response(content)\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n\n@app.route('/tables/datatables.html')\ndef datatables():\n return render_template('tables/datatables.html')\n\n\n@app.errorhandler(403)\ndef page_not_found(error):\n content = json.dumps({\"error_code\": \"403\"})\n resp = Response_headers(content)\n return resp\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n content = json.dumps({\"error_code\": \"404\"})\n resp = Response_headers(content)\n return resp\n\n\n@app.errorhandler(400)\ndef page_not_found(error):\n content = json.dumps({\"error_code\": \"400\"})\n resp = Response_headers(content)\n return resp\n\n\n@app.errorhandler(410)\ndef page_not_found(error):\n content = json.dumps({\"error_code\": \"410\"})\n resp = Response_headers(content)\n return resp\n\n\n@app.errorhandler(500)\ndef page_not_found(error):\n content = json.dumps({\"error_code\": \"500\"})\n resp = Response_headers(content)\n return resp\n\n\nif __name__ == '__main__':\n app.run(\n host='127.0.0.1',\n port=5008,\n debug=True\n )\n","repo_name":"Betafringe/master_app","sub_path":"src/entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30173067663","text":"# ResNet for CIFAR (32x32)\n# 2019.07.24-Changed output of forward function\n# Huawei Technologies Co., Ltd. \n# taken from https://github.com/huawei-noah/Data-Efficient-Model-Compression/blob/master/DAFL/resnet.py\n# for comparison with DAFL\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass BasicBlock(nn.Module):\n expansion = 1\n \n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n \n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n \n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n \n \nclass Bottleneck(nn.Module):\n expansion = 4\n \n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n \n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n \n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n \n \nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = 64\n \n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = nn.Linear(512*block.expansion, num_classes)\n \n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n \n def forward(self, x, return_features=False):\n x = self.conv1(x)\n x = self.bn1(x)\n out = F.relu(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.adaptive_avg_pool2d(out, (1,1))\n feature = out.view(out.size(0), -1)\n out = self.linear(feature)\n\n if return_features:\n return out, feature\n else:\n return out\n \ndef resnet18(num_classes=10):\n return ResNet(BasicBlock, [2,2,2,2], num_classes)\n \ndef resnet34(num_classes=10):\n return ResNet(BasicBlock, [3,4,6,3], num_classes)\n \ndef resnet50(num_classes=10):\n return ResNet(Bottleneck, [3,4,6,3], num_classes)\n \ndef resnet101(num_classes=10):\n return ResNet(Bottleneck, [3,4,23,3], num_classes)\n \ndef resnet152(num_classes=10):\n return ResNet(Bottleneck, [3,8,36,3], num_classes)","repo_name":"VainF/Torch-Pruning","sub_path":"benchmarks/engine/models/cifar/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","stars":1853,"dataset":"github-code","pt":"81"} +{"seq_id":"12041418735","text":"import time\r\nimport os\r\nimport torch\r\nimport wjc_core\r\nimport argparse\r\nimport get_feturse\r\nfrom tensorboardX import SummaryWriter\r\nfrom attention_unet import AttU_Net\r\nfrom segnet import SegNet\r\nfrom unet import Unet\r\nfrom Unet_plus_plus import Unet_plus_plus\r\nfrom TSDUC_Net import TSDCU_net\r\nfrom Deeplab_v3_plus import DeepLabv3_plus\r\nfrom DAUnet import DAUnet\r\n\r\nif __name__ == '__main__':\r\n\r\n print(\"train_GLCM\")\r\n # train_GLCM\r\n train_inPath = './data/train/image/original'\r\n train_outPath = './data/train/image/glcm'\r\n train_image = len(os.listdir(train_inPath))\r\n for i in range(train_image):\r\n traininFile=os.path.join(train_inPath, os.listdir(train_inPath)[i])\r\n trainoutFile = os.path.join(train_outPath, os.listdir(train_inPath)[i])\r\n get_feturse.GLCM_Features(traininFile, trainoutFile)\r\n print(\"val_GLCM\")\r\n # val_GLCM\r\n val_inPath = './data/val/image/original'\r\n val_outPath = './data/val/image/glcm'\r\n val_image = len(os.listdir(val_inPath))\r\n for i in range(val_image):\r\n valinFile = os.path.join(val_inPath, os.listdir(val_inPath)[i])\r\n valoutFile = os.path.join(val_outPath, os.listdir(val_inPath)[i])\r\n get_feturse.GLCM_Features(valinFile, valoutFile)\r\n print(\"test_GLCM\")\r\n # test_GLCM\r\n test_inPath = './data/test/image/original'\r\n test_outPath = './data/test/image/glcm'\r\n test_image = len(os.listdir(test_inPath))\r\n for i in range(test_image):\r\n testinFile = os.path.join(test_inPath, os.listdir(test_inPath)[i])\r\n testoutFile = os.path.join(test_outPath, os.listdir(test_inPath)[i])\r\n get_feturse.GLCM_Features(testinFile, testoutFile)\r\n\r\n\r\n model, name = TSDCU_net(3, 1), 'data_TSDCU_net_5epoch'\r\n parse = argparse.ArgumentParser()\r\n parse.add_argument(\"--model_name\", type=str, default=name)\r\n parse.add_argument(\"--batch_size\", type=int, default=1)\r\n parse.add_argument(\"--epoch\", type=int, default=5)\r\n parse.add_argument(\"--data_file\", type=str, default=\"data\")\r\n parse.add_argument(\"--ckpt\", type=str, help=\"the path of model weight file\", default=\"./\" + name + \"/weights\")\r\n args = parse.parse_args()\r\n # Prepare a space for saving trained model and predicted results.\r\n wjc_core.init_work_space(args)\r\n # Train a model.\r\n start_time = time.time()\r\n writer = SummaryWriter('./' + args.model_name + '/runs')\r\n wjc_core.train(args, writer, model)\r\n writer.close()\r\n end_time = time.time()\r\n print(\"Training cost \", end_time - start_time, \" seconds\")\r\n # Test a model.\r\n start_time = time.time()\r\n # test the model trained\r\n wjc_core.test(args)\r\n # or test a certain model\r\n # wjc_core.test(args, save_gray=True, manual=True, weight_path='./weights/data_TSDCU_net_150epoch.pth')\r\n end_time = time.time()\r\n print(\"Testing cost \", end_time - start_time, \" seconds\")\r\n # Print the validation accuracy of the MODAU-net model. *You can change the pth file.\r\n print(wjc_core.validation(args, torch.load('./data_TSDCU_net_5epoch/weights/data_TSDCU_net_5epoch.pth', map_location='cuda')))\r\n\r\n\r\n\r\n # Print parameter number of each model.\r\n wjc_core.model_print(model)\r\n","repo_name":"fjc1575/Sea-Ice","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18069708565","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport web\nimport markdown\n\ndef make_html(c):\n return markdown.markdown(c)\n\ndef trim_utf8(text, length):\n '''utf8字符截取'''\n extra_flag = '...' if length < len(text.decode('utf-8')) else ''\n return text.decode('utf-8')[0:length].encode('utf-8') + extra_flag\n\ndef comments_to_lis(comments):\n '''评论列表'''\n lis = []\n for c in comments:\n li_start = '
  • ' % c['id']\n user_face= '''
    \n \"%s\"/\n
    ''' % (c['user_id'], c['user_face'], c['username'])\n content_start = '
    '\n content_head = '''
    \n %s\n %s\n 回应\n top\n
    ''' % (c['user_id'], c['username'], c['time'])\n content_quote = ''\n if c['quote_content']:\n content_quote = '

    引用 %s

    %s

    ' \\\n % (c['quote_user_id'], c['quote_username'], c['quote_content'])\n content_body = '

    %s

    ' % c['content']\n content_end = '
    '\n li_end = '
  • '\n\n lis.append({'li': li_start + user_face + content_start + content_head +\n content_quote + content_body + content_end + li_end})\n\n return lis\n\ndef menu(user):\n '''导航菜单'''\n cur_user_id = user.current_id()\n if cur_user_id:\n status = user.status(cur_user_id)\n return [{'link': '/user/%d' % cur_user_id, 'name': status['username']},\n {'link': '/account/posts', 'name': '文章'},\n {'link': '/account/settings', 'name': '设置'},\n {'link': '/logout', 'name': '退出'}]\n else:\n return [{'link': '/login', 'name': '登录'},\n {'link': '/register', 'name': '注册'}]\n","repo_name":"RussellLuo/forum","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"81"} +{"seq_id":"33666490044","text":"import pygame\nimport colores\nimport random\n\nclass Dona:\n def __init__(self, x, y, ancho, alto):\n self.surface = pygame.image.load(\"00.png\")\n self.surface = pygame.transform.scale(self.surface, (ancho, alto))\n self.rect = self.surface.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.visible = True\n self.speed = random.randrange(10, 20, 1)\n\n def update(self):\n self.rect.y += self.speed\n\n def actualizar_pantalla(self, ventana_ppal, personaje):\n if personaje.rect_boca.colliderect(self.rect):\n personaje.score += 1\n self.reiniciar()\n\n if self.rect.y > 800:\n self.reiniciar()\n\n ventana_ppal.blit(self.surface, self.rect)\n\n def reiniciar(self):\n self.rect.x = random.randrange(0, 740, 60)\n self.rect.y = random.randrange(-1000, 0, 60)\n\ndef crear_lista_donas(cantidad):\n lista_donas = []\n for _ in range(cantidad):\n y = random.randrange(-1000, 0, 60)\n x = random.randrange(0, 740, 60)\n dona = Dona(x, y, 60, 60)\n lista_donas.append(dona)\n return lista_donas","repo_name":"Santiago1732/pygamepoo_v1","sub_path":"dona.py","file_name":"dona.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17716632791","text":"import os,re,io\r\nfrom google.cloud import vision\r\nfrom google.cloud.vision import types\r\nimport pandas as pd\r\nos.environ['GOOGLE_APPLICATION_CREDENTIALS']=\"D:\\Pycharm\\ServiceAccount_VisionAPI.json\"\r\nclient=vision.ImageAnnotatorClient()\r\nfile_path='D:\\Pycharm\\Building_house.jpg'\r\naspect_ratios=[4/3]\r\ndef cropHint(file_path,aspect_ratios):\r\n with io.open(file_path,'rb') as img_file:\r\n content=img_file.read()\r\n image=vision.types.Image(content=content)\r\n crop_hint_params=vision.types.CropHintsParams(aspect_ratios=aspect_ratios)\r\n image_context=vision.types.ImageContext(\r\n crop_hints_params=crop_hint_params\r\n )\r\n response=client.crop_hints(\r\n image=image,\r\n image_context=image_context\r\n )\r\n cropHints=response.crop_hints_annotation.crop_hints\r\n for crophint in cropHints:\r\n print('Confidence : ',crophint.confidence)\r\n print('Importance Fraction :',crophint.importance_fraction)\r\n print('Vertices',crophint.bounding_poly)\r\ncropHint(file_path,aspect_ratios)","repo_name":"mohitaroragit/CodeRepo","sub_path":"gcs_vision_detect_Crop_Hint.py","file_name":"gcs_vision_detect_Crop_Hint.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19357860881","text":"# 21.02.23 [가로수]\n'''\n가로수 간격들의 최대공약수 만큼 간격을 두고 나무를 심는다.\n'''\n\nimport sys\n\ndef gcd(a,b):\n if a < b :\n a,b = b,a\n while a%b:\n a,b = b,a%b\n return b\n\nn = int(sys.stdin.readline())\n\nt = []\nfor i in range(n):\n t.append(int(sys.stdin.readline()))\nt.sort()\n\ng = t[1]-t[0]\nfor i in range(2, n):\n g = gcd(g, t[i]-t[i-1])\n\nres = 0\nfor i in range(1,n):\n #간격 -1이 심을 나무 수\n res += (t[i]-t[i-1])//g -1\nprint(res)\n","repo_name":"sladuf/Algorithm","sub_path":"BaekJoon/BJ2485.py","file_name":"BJ2485.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41830645428","text":"# !/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nExample adapted from Josh Sanders' original version on Sanworks Bpod repository\n\"\"\"\nfrom pybpodapi.bpod import Bpod\n\"\"\"\nRun this protocol now\n\"\"\"\nbpod = Bpod()\n\nfor m in bpod.modules:\n\tprint(m)\n\nbpod.close()\n\n","repo_name":"pybpod/pybpod-api","sub_path":"examples/function_examples/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6913097659","text":"import os\r\n\r\nnumbers = [1, 2, 3]\r\nnew_numbers = [item + 1 for item in numbers]\r\n\r\nprint(new_numbers)\r\n# %%\r\nname = \"Daniel\"\r\nletters_list = [letter for letter in name]\r\n\r\nprint(letters_list)\r\n# %%\r\nnumbers = [number * 2 for number in range(1, 5)]\r\n\r\nprint(numbers)\r\n# %%\r\nnames = [\"Jose\", \"Juan\", \"Pedro\", \"Maria\", \"Andres\"]\r\n\r\nshort_names = [name for name in names if len(name) <= 4]\r\n\r\nprint(short_names)\r\n# %%\r\nnames = [\"Jose\", \"Juan\", \"Pedro\", \"Maria\", \"Andres\"]\r\n\r\nupper = [name.upper() for name in names if len(name) > 4]\r\n\r\nprint(upper)\r\n# %%\r\nnumbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\r\n\r\nsquared_numbers = [number**2 for number in numbers]\r\n\r\nprint(squared_numbers)\r\n# %%\r\nnumbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\r\n\r\nresult = [number for number in numbers if number % 2 == 0]\r\n\r\nprint(result)\r\n# %%\r\nimport pandas as pd\r\n\r\nraw1 = pd.read_csv(\"file1.txt\", header=None)\r\nfile1 = raw1[0].tolist()\r\n\r\nraw2 = pd.read_csv(\"file2.txt\", header=None)\r\nfile2 = raw2[0].tolist()\r\n\r\nfile1\r\n\r\nresult = [file1[number] for number in range(len(file1)) if file1[number] in file2]\r\n# result = [number for number in file1 if number in file2] # Same thing, but easier.\r\n\r\nprint(result)\r\n\r\n# or\r\n\r\nwith open(\"file1.txt\") as file1:\r\n data3 = file1.readlines()\r\n\r\nwith open(\"file2.txt\") as file2:\r\n data4 = file2.readlines()\r\n\r\nresult = [int(number) for number in data3 if number in data4]\r\n\r\nprint(result)\r\n# %%\r\nimport random\r\n\r\nnames = [\"Jose\", \"Juan\", \"Pedro\", \"Maria\", \"Andres\"]\r\n\r\n# {ney_key: new_value for item in list}\r\nstudent_scores = {student: random.randint(1, 100) for student in names}\r\nstudent_scores\r\n\r\n# {new_key: new_value for (key, value) in dictionary.items() if test}\r\npassed_students = {student: score for (student, score) in student_scores.items() if score >= 60}\r\npassed_students\r\n# %%\r\nsentence = \"What is the Airspeed Velocity of an Unladen Swallow?\"\r\n\r\nresult = {word: len(word) for word in sentence.split()}\r\n\r\nprint(result)\r\n# %%\r\n# Celcius to Farenheit -> (temp_c * 9/5) + 32 = temp_f\r\n\r\nweather_c = {\r\n \"Monday\": 12,\r\n \"Tuesday\": 14,\r\n \"Wednesday\": 15,\r\n \"Thursday\": 14,\r\n \"Friday\": 21,\r\n \"Saturday\": 22,\r\n \"Sunday\": 24,\r\n}\r\n\r\nweather_f = {day: (temp_c * 9/5) + 32 for (day, temp_c) in weather_c.items()}\r\n\r\nprint(weather_f)\r\n# %%\r\nimport pandas as pd\r\n\r\nstudent_dict = {\"student\": [\"Angela\", \"James\", \"Lily\"],\r\n \"score\": [56, 76, 98]}\r\n\r\nstudent_df = pd.DataFrame(student_dict)\r\n\r\nstudent_df\r\n\r\n# Loop through a Pandas data frame:\r\nfor (index, row) in student_df.iterrows():\r\n print(row)\r\n print()\r\n print(row.student)\r\n print()\r\n print(row.score)\r\n print()\r\n print()\r\n# %%\r\n","repo_name":"DanielMoscoso/100day_challenge","sub_path":"Day_26/warm_up.py","file_name":"warm_up.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70224711305","text":"# Heart\r\n\r\n# import turtle package\r\nimport turtle\r\n\r\npen = turtle.Turtle()\r\n\r\ndef curve():\r\n for i in range (200):\r\n pen.right(1)\r\n pen.forward(1)\r\ndef heart():\r\n pen.fillcolor('red')\r\n pen.begin_fill()\r\n pen.left(140)\r\n pen.forward(113)\r\n curve()\r\n pen.left(120)\r\n curve()\r\n pen.forward(112)\r\n pen.end_fill()\r\n\r\ndef txt():\r\n pen.up()\r\n pen.setpos(-68, 99)\r\n pen.down()\r\n pen.color('lightgreen')\r\n pen.write(\"lol\", font=(\"Verdana\", 12, \"bold\"))\r\n \r\nheart()\r\ntxt()\r\npen.ht()\r\n\r\n'''\r\n#note - an ERROR may be caused as Python is complaining about breaking out of the while loop when the window is closed - you can ignore that for now (or a fix is provied in the solutions section for those who are interested\"\"\"\r\n\"\"\"\r\n==========Task===============\r\nChange the starting values in the list and experiment with them\r\n\"\"\"\r\n\r\nfrom tkinter import *\r\nimport random\r\nimport time\r\n\r\nclass Ball: \r\n def __init__(self,canvas,color): \r\n self.canvas=canvas \r\n self.id=canvas.create_oval(30,30,50,50,fill=color) \r\n self.canvas.move(self.id,100,200)\r\n\r\n #ADD THESE LINES TO OUR __INIT__ METHOD\r\n starting_position=[-3,-2,-1,1,2,3] #create a list with various possible starting positions\r\n random.shuffle(starting_position) #mix them up with the random shuffle function\r\n self.x = starting_position[0] #set the value of x to the first item in the list - this means x can be any value in the list from -3 to 3\r\n self.y = -3 #changing y to -3 will simply increase the rate at which it moves along y (increases the speed)\r\n self.canvas_height=self.canvas.winfo_height() #set the canvas height by calling the canvas function winfo_height (it gives us the current canvas height)\r\n\r\n def draw(self): \r\n self.canvas.move(self.id,self.x,self.y) \r\n pos=self.canvas.coords(self.id) \r\n \r\n\r\n if pos[1] <=0: \r\n self.y=1\r\n if pos[3] >=self.canvas_height: \r\n self.y=-1\r\n \r\n\r\ndef main():\r\n tk=Tk()\r\n tk.title(\"My 21st Century Pong Game\")\r\n tk.resizable(0,0)\r\n tk.wm_attributes(\"-topmost\",1)\r\n canvas=Canvas(tk,bg=\"white\",width=500,height=400,bd=0,highlightthickness=0)\r\n canvas.pack()\r\n tk.update()\r\n\r\n ball1=Ball(canvas,'green')\r\n while 1:\r\n tk.update()\r\n ball1.draw() #call the ball draw method here\r\n time.sleep(0.05)\r\nmain()\r\n'''","repo_name":"AmitRathod07/My_Projects","sub_path":"Py App/Project_Hrt.py","file_name":"Project_Hrt.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37990952797","text":"#working with CSV files\nprint(\"--- WORKING WITH CSV FILES ---\")\n\nimport csv\n\ndef display_dictionary_line_by_line (dict):\n print (' ')\n for item in dict:\n print (item,':', dict[item] )\n\nwith open('Census_by_Community_2018.csv', newline='') as census_db:\n reader = csv.reader(census_db, delimiter =',',)\n counter = 0\n dict1 = {}\n dict2 = {}\n for row in reader:\n key1 = row[0]\n key2 = row[4]\n if counter != 0:\n if key1 in dict1.keys():\n dict1[key1] = int(dict1[key1]) + int(row[9])\n else:\n dict1[row[0]] = row[9]\n if key2 in dict2.keys():\n dict2[key2] = int(dict2[key2]) + int(row[9])\n else:\n dict2[row[4]] = row[9]\n counter = counter + 1\n display_dictionary_line_by_line (dict1)\n display_dictionary_line_by_line (dict2)\n","repo_name":"giochajon/pythonStuff","sub_path":"filemanipulation.py","file_name":"filemanipulation.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12303968026","text":"import datetime\nimport urllib.parse as urlparse\n\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.managers import TaggableManager\nfrom django.db.models import Q\n\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom bookmarks import settings\nfrom django.conf import settings as global_settings\n\nif 'cms' in global_settings.INSTALLED_APPS:\n from cms.models.pluginmodel import CMSPlugin\nimport traceback\nimport sys\nfrom django.template.defaultfilters import slugify\nfrom django.utils import timezone\n\n\nfrom PirateLearner.models import BaseContentClass\n\nfrom django.utils.html import strip_tags\nfrom django.utils.safestring import mark_safe\n\nfrom bookmarks.utils import truncate_words\n\nPRIVACY = (\n ('pub','public'),\n ('priv','private'),\n)\n\n\nclass Bookmark(BaseContentClass):\n\n url = models.URLField(unique=True)\n\n adder = models.ForeignKey(User, related_name=\"added_bookmarks\", verbose_name=_(\"adder\"),on_delete = models.CASCADE)\n added = models.DateTimeField(_(\"added\"), default=datetime.datetime.now)\n\n def __unicode__(self):\n return self.url\n\n class Meta:\n ordering = [\"-added\", ]\n\n\nclass BookmarkFolderInstance(BaseContentClass):\n\n adder = models.ForeignKey(User, related_name=\"bookmarks_folder\", verbose_name=_(\"user\"),on_delete = models.CASCADE)\n created = models.DateTimeField(_(\"created\"), default=datetime.datetime.now)\n\n title = models.CharField(_(\"title\"), max_length=100)\n description = models.TextField(_(\"description\"), blank=True)\n def __unicode__(self):\n return _(\"%(title)s\") % {\"title\":self.title}\n\nclass BookmarkInstance(BaseContentClass):\n\n bookmark = models.ForeignKey(Bookmark, related_name=\"saved_instances\", verbose_name=_(\"bookmark\"),on_delete = models.CASCADE)\n user = models.ForeignKey(User, related_name=\"saved_bookmarks\", verbose_name=_(\"user\"),on_delete = models.CASCADE)\n saved = models.DateTimeField(_(\"saved\"), default=datetime.datetime.now)\n\n title = models.CharField(_(\"title\"), max_length=100)\n description = models.TextField(_(\"description\"), blank=True)\n note = models.TextField(_(\"note\"), blank=True)\n image_url = models.URLField(blank=True, null=True)\n folder = models.ForeignKey(BookmarkFolderInstance, verbose_name=_(\"folder\"),on_delete = models.CASCADE)\n privacy_level = models.CharField(choices=PRIVACY,max_length=4)\n tags = TaggableManager(blank=True)\n is_promoted = models.BooleanField(default=False);\n def save(self, url,force_insert=False, force_update=False):\n try:\n bookmark = Bookmark.objects.get(url=url)\n except Bookmark.DoesNotExist:\n # has_favicon=False is temporary as the view for adding bookmarks will change it\n bookmark = Bookmark(url=url, adder=self.user)\n bookmark.save()\n if self.folder:\n folder_name = self.folder.title\n else:\n print(\"Logs:folder not provided\")\n folder_name = 'Orphan'\n try:\n folder = BookmarkFolderInstance.objects.get(adder = self.user,title=folder_name )\n except BookmarkFolderInstance.DoesNotExist:\n print(\"Logs:Creating Orphan folder for user\")\n folder = BookmarkFolderInstance.create(adder = self.user,title=folder_name,description= self.description )\n folder.save()\n self.folder = folder\n if not self.privacy_level:\n self.privacy_level = 'priv'\n print(\"Logs: Privacy level not provided setting default to private\")\n\n if not self.is_promoted:\n if self.user.is_staff and self.privacy_level == 'pub':\n self.is_promoted = True\n else:\n self.is_promoted = False\n\n self.bookmark = bookmark\n self.saved = timezone.now()\n super(BookmarkInstance, self).save(force_insert, force_update)\n\n def delete(self):\n bookmark = self.bookmark\n super(BookmarkInstance, self).delete()\n if bookmark.saved_instances.all().count() == 0:\n bookmark.delete()\n\n def __unicode__(self):\n return _(\"%(bookmark)s for %(user)s\") % {\"bookmark\":self.bookmark, \"user\":self.user}\n\n def get_absolute_url(self):\n kwargs = {'slug': slugify(self.title)+'/'+str(self.id),}\n return reverse('bookmarks:detail-view', kwargs=kwargs)\n\n def get_external_url(self):\n return self.bookmark.url\n\n def get_url_domain(self):\n parsed_uri = urlparse.urlparse( self.bookmark.url )\n return '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)\n\n def get_image_url(self):\n if len(self.image_url)>0:\n return self.image_url\n else:\n return(settings.BOOKMARK_DEFAULT_IMAGE)\n\n def get_title(self):\n return self.title\n\n def get_note(self):\n return self.note\n\n\n def get_summary(self):\n description = strip_tags(self.description)\n return mark_safe(truncate_words(description,120))\n\n def get_description(self):\n return self.description\n\n def get_parent_title(self):\n return self.folder.title\n\n def get_tags(self):\n tags = self.tags.all()\n tag_list = []\n for tag in tags:\n try:\n tmp = {}\n tmp['name'] = tag.slug\n kwargs = {'tag': tag.slug,}\n tmp['url'] = reverse('bookmarks:tagged-bookmarks',kwargs=kwargs)\n tag_list.append(tmp)\n except:\n print((\"Unexpected error:\", sys.exc_info()[0]))\n for frame in traceback.extract_tb(sys.exc_info()[2]):\n fname,lineno,fn,text = frame\n print((\"Error in %s on line %d\" % (fname, lineno)))\n return tag_list\n\nif 'cms' in global_settings.INSTALLED_APPS:\n class LatestBookmarksPlugin(CMSPlugin):\n\n latest_entries = models.IntegerField(default=5, help_text=('The number of latests entries to be displayed.'))\n tags = models.ManyToManyField('taggit.Tag', blank=True, help_text=('Show only the bookmarks tagged with chosen tags.'))\n\n def __unicode__(self):\n return str(self.latest_entries)\n\n def copy_relations(self, oldinstance):\n self.tags = oldinstance.tags.all()\n\n def get_bookmarks(self):\n posts = BookmarkInstance.objects.all().filter(user__is_staff=True,privacy_level='pub').order_by('-saved')\n # print('Printing get_bookmarks')\n # for post in posts:\n # print(post)\n\n tags = list(self.tags.all())\n if tags:\n posts = posts.filter(tags__in=tags)\n return posts[:self.latest_entries]\n\n\ndef get_bookmarks_count(user=None):\n try:\n if user:\n return BookmarkInstance.objects.filter(user = user).count()\n else:\n return BookmarkInstance.objects.count()\n except:\n return None\n\n\ndef get_user_bookmark(user,privacy = None):\n try:\n bookmark_instance = None\n if privacy:\n bookmark_instance = BookmarkInstance.objects.filter(user_id = user,privacy_level = privacy)\n else:\n bookmark_instance = BookmarkInstance.objects.filter(user_id = user)\n if bookmark_instance:\n return bookmark_instance\n else:\n return None\n except:\n return None\n\ndef get_bookmark(url):\n try:\n bookmark_instance = BookmarkInstance.objects.filter(bookmark___url = url)[0]\n if bookmark_instance:\n return bookmark_instance\n else:\n return None\n except:\n return None\n","repo_name":"PirateLearner/pi","sub_path":"bookmarks/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7681,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"20372138827","text":"# Purpose:\tObtain an idea of how closely related copepod species are within the genus Caligus.\r\n#\t\t\tBy having an idea of the phylogenetic relationships in this genus, it may help to\r\n#\t\t\tdetermine if other observed correlations between species (distribution, host specificity,\r\n#\t\t\tbehavior, etc.) may be partially explained by how closely related the species are.\r\n\r\n#so I can use regular expressions\r\nimport re\r\n\r\n# Obtain copepod genetic sequences from NCBI in FASTA format\r\n\r\n# Reformat data so each title and DNA sequence is on a single, individual line\r\n\r\n# LINEARIZE SEQUENCES\r\n#create file to hold newly formatted information\r\ndef line_seq(filename):\r\n with open(filename) as seqfile:\r\n name = \"\"\r\n seqs = \"\"\r\n for line in seqfile:\r\n line = line.strip()\r\n if line.startswith(\">\"):\r\n if name != \"\":\r\n yield name, seqs\r\n seqs = \"\"\r\n name = line.lstrip(\">\")\r\n else:\r\n seqs = seqs + line\r\n\t\t\t\t\r\n#turn into dict\r\n\r\ndna_dict = dict(line_seq(\"Caligus_seq.fasta\"))\r\n\r\n#remove unwanted and duplicate sequences\r\n#open new dict\r\nnodup = {}\r\n#reads each dictionary item and adds it to \"nodup\" dictionary if new sequene\r\nfor name,value in dna_dict.items():\r\n if value not in nodup.values():\r\n nodup[name] = value\r\n\t\t\r\n\t\t\r\nfor name,value in nodup.items():\r\n f=open('dicttest.txt','a')\r\n f.write(name + \"\\t\" + value + \"\\n\")\r\n f.close()\r\n\t\t\t\r\n# Cytochrome C mitochondrial sequences only\r\n# Strip excess title information so only species names and sequences remain\r\n\r\ninfile = open('dicttest.txt', 'r')\r\noutfile = open('dicttest_strip.txt', 'w')\r\n\r\nfor lines in infile:\r\n ll = lines\r\n pattern = r'(^g.*)(Caligus\\s\\w+)(.+?drial)(\\s+)([AC]T[A+|G+|C+|T+]{526})'\r\n aa = re.search(pattern, str(ll))\r\n\t#if finds a match, stores name and sequence in groups 2 and 5\r\n if aa != None:\r\n aa2 = aa.group(2)\r\n aa5 = aa.group(5)\r\n\t\t#writes name and sequence separated by a tab\r\n outfile.write(str(aa2) + '\\t' +str(aa5))\r\n\t\t#puts each species and sequence on a new line\r\n outfile.write('\\n')\r\n\t#if no match found, continues to next line\t\r\n else:\r\n continue\r\n\r\n#close files to write\t\t\r\ninfile.close()\r\noutfile.close()\r\n\r\n#REMOVE SPACES IN SPECIES NAMES\r\n# Function to remove spaces in species names\r\n\r\nfile2 = open('dicttest_nospace.txt', 'w')\r\n\r\ndef strip(file1):\r\n for line in file1:\r\n ll = line\r\n pattern = r'(\\w+)(\\s)(\\w+)\\t([ACTG]+)'\r\n aa = re.search(pattern, str(ll))\r\n if aa != None:\r\n aa1 = aa.group(1)\r\n aa3 = aa.group(3)\r\n aa4 = aa.group(4)\r\n file2.write(str(aa1)+ '_' + str(aa3) + '\\t' + str(aa4))\r\n file2.write('\\n')\r\n else:\r\n continue\r\n\t\t\r\nfile1 = open('dicttest_strip.txt', 'r')\r\n\r\nstrip(file1)\r\n\r\n# Create nexus file\r\n\r\n \r\n\r\n\t# Write in blocks of nexus directions\r\n # Pre-block of required information at start of file\r\n # Analysis block to run mcmc at end of file\r\n # Insert Caligus species names and sequences between nexus code blocks\r\n\t# Ensure species names and sequences are aligned\r\n#align sequences\r\noutput = open('dicttest.nex', 'a')\r\n\r\ndef align(filename):\r\n with open(filename) as f:\r\n for line in f:\r\n data = line.split() # Splits on whitespace\r\n output.write('{0[0]:<25}{0[1]:<528}'.format(data)) #aligns sequences after 25 characters on each line\r\n output.write('\\n')\r\n output.close()\r\n\t\t\t\r\nalign('dicttest_nospace.txt')\r\n\r\n# Send nexus file to Mrbayes for execution\r\n # Run sumt and sump commands\r\n# Send tree information to R to construct phylo-tree","repo_name":"mgamble86/FinalProject","sub_path":"main_code_mar16.py","file_name":"main_code_mar16.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2477855419","text":"import random\nfrom util import *\nfrom ansi import *\nimport numpy as np\nimport ipdb as pdb\nimport atexit\nimport copy\nimport curses\nfrom time import sleep\n\nclass Erasure:\n\tdef __init__(self, co, timestep):\n\t\tself.co = co\n\t\tself.timestep = timestep\n\ndef sign0(v):\n\treturn v and (1, -1)[v < 0]\n\nclass World:\n\tt_bot = 0\n\tt_empty = 1\n\tt_human = 2\n\tt_block = 3\n\tt_fixedblock = 4\n\tt_horizon = 5\n\tt_nearground = 6\n\n\tdef __init__(self, size=(30,80,80), tsize=(None,None)):\n\t\t# Gets terminal width for missing tsize values (y,x)\n\t\tself.size = size\n\t\t#self.a = [[[0] * self.xs for i in range(self.ys)] for j in range(self.zs)]\n\t\tself.a = np.zeros(size)\n\t\ttxs, tys = get_linux_terminal()\n\n\t\tself.tsize = [None,None]\n\t\tif tsize[0] == None: self.tsize[0] = tys\n\t\tif tsize[1] == None: self.tsize[1] = txs\n\n\t\tself.tyscale = .7 # Display world y in term height * tyscale\n\t\tself.objs = []\n\t\tself.status_lines = 15\n\t\tself.ui_initted = False\n\t\tself.trow_status = tys-self.status_lines\n\t\tself.timestep = 0\n\t\tself.erasures = []\n\t\tself.curses = None\n\t\tself.statuses = []\n\tdef update_tsize(self):\n\t\t#txs, tys = get_linux_terminal()\n\t\tself.tsize = list(self.curses.getmaxyx())\n\t\tself.trow_status = self.tsize[0]-self.status_lines\n\n\t\tself.wworldheight, self.wworldwidth = self.winworld.getmaxyx()\n\t\tself.wstatusheight, self.wstatuswidth = self.winstatus.getmaxyx()\n\n\t# Convenience properties, xs => size[2]\n\t@property\n\tdef xs(self): return self.size[2]\n\t@xs.setter\n\tdef xs(self, v): self.size[2] = v\n\n\t@property\n\tdef ys(self): return self.size[1]\n\t@ys.setter\n\tdef ys(self, v): self.size[1] = v\n\n\t@property\n\tdef zs(self): return self.size[0]\n\t@zs.setter\n\tdef zs(self, v): self.size[0] = v\n\n\tdef screensize(size=(None,None)): # Set or get (if both None)\n\t\tif not size[0] == None: self.tsize[0] = size[0]\n\t\telif not size[1] == None: self.tsize[1] = size[1]\n\t\telse: return self.tsize\n\tdef is_freespace(self, pos, size):\n\t\t#pf(pos, size)\n\t\tif pos[0]+size[0] >= self.zs \\\n\t\t or pos[1]+size[1] >= self.ys \\\n\t\t or pos[2]+size[2] >= self.xs:\n\t\t\treturn False\n\t\tfor tz in range(pos[0], pos[0]+size[0]+1):\n\t\t\tfor ty in range(pos[1], pos[1]+size[1]+1):\n\t\t\t\tfor tx in range(pos[2], pos[2]+size[2]+1):\n\t\t\t\t\tif self.a[tz][ty][tx]: # Something here\n\t\t\t\t\t\treturn False\n\t\treturn True # Found nothing in this area\n\tdef find_freespace(self, size): # Still using random in a loop\n\t\t#pf(\"size\", size)\n\t\t# size: (z,y,x)\n\t\t# Raises MemoryError() if no room for size\n\t\tfor tries in range(0, 1000): # Try 1000 times to find a free space\n\t\t\tx = random.randint(0, self.size[2])\n\t\t\ty = random.randint(0, self.ys)\n\t\t\tz = 0\n\t\t\tif self.is_freespace((z,y,x), size): return (z,y,x)\n\t\traise MemoryError(\"No free space in world for object of size:\", size)\n\tdef add_object(self, o, pos=None):\n\t\t#pf(\"o.size\", o.size)\n\t\tif pos: o.pos = np.array(pos, dtype=np.float64)\n\t\telse: o.pos = np.array(self.find_freespace(o.size), dtype=np.float64)\n\t\tself.objs.append(o)\n\t\to.windex = len(self.objs)\n\t\to.world = self\n\t\t#pf(\"Added object:\", o.type(), o, \"at pos\", o.pos)\n#\t\tif o.vel[i] - o2.vel[i] < 0:\n#\t\t\t#o.pos[i] = o2.pos[i]+o2.size[i]\n#\t\t\tpass\n#\t\telif o.vel[i]>0:\n#\t\t\t#gyx(co[1], co[0])\n#\t\t\t#pfpl(red, \"*\", rst)\n#\t\t\tpass\n#\t\t\t#o.pos[i] = o2.pos[i]-o2.size[i]\n\tdef init_ui(self):\n\t\tself.ui_initted = True\n\t\t#pfpl(chide, rst)\n\t\t#cls()\n\t\tself.curses = curses.initscr()\n\t\tcurses.start_color()\n\t\tcurses.noecho()\n\t\t#curses.cbreak()\n\t\tcurses.curs_set(0)\n\t\tself.curses.keypad(1)\n\n\t\tself.tsize = list(self.curses.getmaxyx())\n\n\t\twworldheight = self.tsize[0]-self.status_lines\n\t\twworldwidth = self.tsize[1]\n\t\twstatusheight = self.status_lines\n\t\twstatuswidth = self.tsize[1]\n\t\twstatuslocy = self.tsize[0]-self.status_lines\n\t\twstatuslocx = 0\n\t\tself.wworldheight = wworldheight\n\t\tself.wworldwidth = wworldwidth\n\t\tself.wstatusheight = wstatusheight\n\t\tself.wstatuswidth = wstatuswidth\n\n\t\t#eprint(\"winworld: {},{} 0,0\".format(wworldheight,wworldwidth))\n\t\t#eprint(\"winstatus: {},{} {},{}\".format(wstatusheight,wstatuswidth,\n\t\t\t#wstatuslocy,wstatuslocx))\n\t\tself.winworld = curses.newwin(wworldheight,wworldwidth, 0,0)\n\t\tself.winstatus = curses.newwin(wstatusheight,wstatuswidth, \n\t\t\twstatuslocy,wstatuslocx)\n\n\t\tself.winstatus.scrollok(True)\n\t\tself.winworld.refresh()\n\t\tself.winstatus.refresh()\n\n\t\tself.winworld.nodelay(1)\n\t\tself.winstatus.nodelay(1)\n\n\t\tcurses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)\n\t\tcurses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)\n\t\tcurses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)\n\t\tcurses.init_pair(4, curses.COLOR_BLUE, curses.COLOR_BLACK)\n\t\tcurses.init_pair(5, curses.COLOR_MAGENTA, curses.COLOR_BLACK)\n\t\tcurses.init_pair(6, curses.COLOR_CYAN, curses.COLOR_BLACK)\n\t\tcurses.init_pair(7, curses.COLOR_WHITE, curses.COLOR_BLACK)\n\t\tself.c_red = curses.color_pair(1)\n\t\tself.c_gre = curses.color_pair(2)\n\t\tself.c_yel = curses.color_pair(3)\n\t\tself.c_blu = curses.color_pair(4)\n\t\tself.c_mag = curses.color_pair(5)\n\t\tself.c_cya = curses.color_pair(6)\n\t\tself.c_whi = curses.color_pair(7)\n\t@atexit.register\n\tdef restore_ui():\n\t\t#pfl(cshow)\n\t\t#curses.keypad(0)\n\t\tcurses.nocbreak()\n\t\tcurses.echo()\n\t\tcurses.curs_set(1)\n\t\tcurses.endwin()\n\t\tpf(\"Reset 1\")\n\t\tos.system(\"stty sane\")\n\tdef restore_ui(self):\n\t\tcurses.nocbreak()\n\t\tcurses.echo()\n\t\tcurses.curs_set(1)\n\t\tcurses.endwin()\n\t\tself.ui_initted = False\n\t\tpf(\"Reset 2\")\n\t\tos.system(\"stty sane\")\n\tdef log(self, s):\n\t\tself.statuses.append(s)\n\t\tif len(self.statuses) >= self.status_lines:\n\t\t\tdel self.statuses[0]\n\t\t\t#self.winstatus.move(0,0)\n\t\t\t#self.winstatus.deleteln()\n\t\t#self.winstatus.move(len(self.statuses), 0)\n\t\t#self.winstatus.clrtoeol()\n\t\tself.winstatus.addstr(s + \"\\n\")\n\t\tself.winstatus.refresh()\n\tdef putatyx(self, co, s, attr):\n\t\t#self.\n\t\treturn\n\tdef erase_erasures(self):\n\t\tnewlist = []\n\t\tfor i in range(len(self.erasures)):\n\t\t\te = self.erasures[i]\n\t\t\tif e.timestep > self.timestep:\n\t\t\t\tself.winworld.addstr(e.co[0], e.co[1], \" \")\n\t\t\telse:\n\t\t\t\tnewlist.append(e)\n\t\tself.erasures = newlist\n\tdef draw(self):\n\t\tosort = sorted(self.objs, key=lambda o: o.pos[1], reverse=True)\n\t\tself.erase_erasures()\n\t\t# Erase old positioned objects\n\t\tfor o in osort:\n\t\t\tif o.oldpos is not None:\n\t\t\t\tcoo = self.world_co_to_screen(o.oldpos)\n\t\t\t\tco = self.world_co_to_screen(o.pos)\n\t\t\t\tif coo != co:\n\t\t\t\t\tself.draw_object(o, erase=True, manpos=o.oldpos)\n\t\t#self.log(\"Timestep: {}\".format(self.timestep))\n\t\t# Draw objects\n\t\tfor o in osort:\n\t\t\tself.draw_object(o)\n\t\t\t# Draw collisions\n\t\t\t# This is pretty slow, comparing all objects to each other\n\t\t\tfor o2 in self.objs:\n\t\t\t\tif o2 == o: continue\n\t\t\t\tif o.typ == o2.typ:\n\t\t\t\t\tif o.fixed: continue\n\t\t\t\thitside = self.overlap_obj(o, o2)\n\t\t\t\to.hitside = hitside\n\t\t\t\tif hitside is not None: # If overlap\n\t\t\t\t\tself.draw_collision(o, hitside)\n\t\t\t\t\to.handle_overlap(o2, hitside)\n\t\t\t\t\tpass\n\t\t#if not self.timestep % 10:\n\t\tself.winworld.refresh()\n\n\tdef step(self):\n\t\tself.timestep += 1\n\t\toi = -1\n\t\tfor o in self.objs:\n\t\t\tif o.fixed: continue\n\t\t\toi += 1\n\t\t\to.step()\n\t\t\t#self.log(\"Newpos: {}\".format(o.pos))\n\t\t\t#self.log(\"Oldpos 2: {}\".format(oldobj.pos))\n\t\t\tfor i in range(3):\n\t\t\t\tif o.vel[i]<0 and o.pos[i]<0:\n\t\t\t\t\to.pos[i] = 0\n\t\t\t\t\to.vel[i] = 0\n\t\t\t\tif o.vel[i]>0 and o.pos[i]+o.size[i]-1 >= self.size[i]:\n\t\t\t\t\to.pos[i] = self.size[i]-o.size[i]\n\t\t\t\t\to.vel[i] = 0\n\t\t\t#eprint(\"poso new: {} -> {}\\n\".format( o.oldpos, o.pos))\n\tdef world_co_to_screen(self, co, obj=None): # Project along y orthogonally\n\t\tty,tx = self.winworld.getmaxyx()\n\n\t\tsx = int(tx * (co[2] / self.xs))\n\t\tsy = ty - 1 - \\\n\t\t\tint(ty * (co[0] / self.zs)) - \\\n\t\t\tint(ty * (co[1] / self.ys) * self.tyscale)\n\t\t#self.log(\"ty,tx {},{} co:{}, sy,sx {},{}\".format(ty,tx,co, sy, sx))\n\t\treturn sy, sx\n\t\t\n\tdef draw_object(self, o, erase=False, manpos=None, color=None): # or Manual position\n\t\tif color is None: color = self.c_whi\n\n\t\tpos = o.pos if manpos is None else manpos\n\n\t\tif o.oldpos is None:\n\t\t\t\to.oldpos = np.array(pos)\n\t\t\t\t#eprint(\"Initial update: {}\".format(pos))\n\n\t\tco = self.world_co_to_screen(pos, obj=o)\n\t\tif (o.oldpos != pos).any():\n\t\t\t\to.oldpos = np.array(pos)\n\t\t\t\t#eprint(\"Secondary update: {}\".format(pos))\n\n\t\theight = len(o.pic)\n\t\tfor y in range(0, len(o.pic)):\n\t\t\t#if o.typ == self.t_human: pdb.set_trace()\n\t\t\t#if o.typ != t_nearground and o.typ != t_horizon:\n\t\t\t#\tself.log(\"[\" + str(self.timestep) + \"] Obj: \" + str(o) + \" at \" + str(co[0]+1+y))\n\n\t\t\t# GET COORDINATES OF TOP LEFT\n\t\t\t# -height+1 to start at the top\n\t\t\t#self.log(\"Co {},{}\".format(co[0], co[1]))\n\t\t\tty = co[0]-height+y\n\t\t\t# Don't round up with +.5 cuz we're not at 0, but on the first char\n\t\t\ttx = co[1] - int(o.size[2]/2)\n\t\t\t'''\n\t\t\tif o.typ == self.t_nearground and tx > 80:\n\t\t\t\tos.system(\"stty sane\")\n\t\t\t\tpf(\"o pos: {}\".format(o.pos))\n\t\t\t\tpf(\"ty,x {},{}\".format(ty, tx))\n\t\t\t\tpdb.set_trace()\n\t\t\t'''\n\n\t\t\tfor x in range(0, len(o.pic[y])):\n\t\t\t\t# Don't print spaces (just advance right)\n\t\t\t\tif ty >= 0 and ty < self.wworldheight and \\\n\t\t\t\t\t\ttx+x >= 0 and tx+x < self.wworldwidth:\n\t\t\t\t\tself.log(\"Adding @ {},{}\".format(ty, tx+x))\n\t\t\t\t\tif erase:\n\t\t\t\t\t\tself.winworld.addstr(ty, tx+x, \" \")\n\t\t\t\t\telse:\n\t\t\t\t\t\tif o.pic[y][x] == ' ': pass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.winworld.addstr(ty, tx+x, o.pic[y][x])\n\t\t\t\t\tif ty == co[0] and tx == co[1]:\n\t\t\t\t\t\tself.winworld.addstr(ty, tx, '@', self.c_cya)\n\t\t\t#gyx(co[0]-1, co[1])\n\t\t\t#pfpl(yel, o.windex, rst)\n\n\t\t\t#gyx(co[0]-2, co[1])\n\t\t\t#pfpl(yel, \"@\", rst)\n\t\tif o.typ == self.t_human:\n\t\t\tself.winworld.refresh()\n\tdef draw_collision(self, o, hitside, erase=None):\n\t\tco = self.world_co_to_screen(o.pos)\n\t\txsign = sign0(hitside[2])\n\t\tysign = sign0(hitside[1])\n\t\tif hitside[1] and hitside[2]: # Hit on z side (y on screen)\n\t\t\tty = int(co[0] + ysign*o.size[1])\n\t\t\ttx = int(co[1] + xsign*o.size[2])\n\t\telif hitside[1]: # Just on y\n\t\t\tty = int(co[0] + ysign*o.size[1])\n\t\t\ttx = co[1]\n\t\telif hitside[2]: # Just on x\n\t\t\tty = int(co[0] + ysign*(o.size[1]/2 + .5))\n\t\t\ttx = int(co[1] + xsign*(o.size[2]/2 + .5))\n\n\t\t#ty = int(co[0] + hitside[0]*o.size[0]/2)\n\t\t#tx = int(co[1] + hitside[2]*o.size[2]/2)\n\t\t#ty = int(co[0]-1)\n\t\t#tx = int(co[1])\n\t\t#self.log(\"Hit: {} coy.x: {} {}\".format(hitside, co[0], co[1]))\n\t\t#self.log(\"Hit: {} coy.x: {} {}\".format(hitside, ty, ty))\n\t\t#pf(\"Hit:\", hitside, \"ty,x:\", ty, tx)\n\t\t#pf(\"\")\n\t\t#eprint(\"tyx:{},{} tsize:{} h:{}\".format(ty,tx, self.tsize, self.wworldheight))\n\t\tif ty >= 0 and tx >= 0 and ty < self.wworldheight-1 and tx < self.wworldwidth:\n\t\t\tchar = \"*\"\n\t\t\tself.winworld.addstr(ty, tx, char, self.c_red)\n\t\t\tself.add_timed_erase((ty,tx), 1)\n\tdef pyxr(y, x, s):\n\t\tself.winworld.addstr(y, x, s)\n\tdef add_timed_erase(self, co, ticks):\n\t\tself.erasures.append(Erasure(co, self.timestep + ticks))\n\tdef wref(self):\n\t\tself.winworld.refresh()\n\tdef overlap_obj(self, o, o2):\n\t\t# Returns None if false, else hitside = [z,y,x] <= {-1,0,1}+\n\t\t# Where -1 means the negative side, while 0 means we're in an overlap\n\t\t# (...and the collision side is therefore unknown, because of our\n\t\t# simplistic step analysis of collision)\n\t\tocount = 0\n\t\thitside = [0,0,0]\n\t\t#self.log(\"op:{} os:{} || o2p:{} o2s{}\".format(o.pos, o.size, o2.pos, o2.size))\n\t\tfor i in range(3):\n\t\t\top = o.pos[i] # Position of object in dimension i\n\t\t\to2p = o2.pos[i]\n\t\t\tosiz = o.size[i]/2 # Size of obj in dimension i\n\t\t\to2siz = o2.size[i]/2\n\t\t\tov = o.vel[i]\n\t\t\to2v = o2.vel[i]\n\t\t\tdv = ov-o2v\n\t\t\tif i == 0:\n\t\t\t\top += osiz\n\t\t\t\to2p += o2siz\n\t\t\tif op-osiz >= o2p-o2siz and op+osiz <= o2p+o2siz: # o is Enclosed in this axis\n\t\t\t\tocount += 1\n\t\t\t\thitside[i] = 0\n\t\t\telif op+osiz >= o2p-o2siz and op-osiz < o2p+o2siz: # Overlap o right\n\t\t\t\tocount += 1\n\t\t\t\thitside[i] = (o2p+o2siz)-(op-osiz) if dv else 0\n\t\t\telif op-osiz <= o2p+o2siz and op+osiz > o2p-o2siz: # Overlap o left\n\t\t\t\tocount += 1\n\t\t\t\thitside[i] = (o2p-o2siz)-(op+osiz) if dv else 0\n\n\t\t# Now take the minimum overlap area, because [hopefully] we just\n\t\t# entered that side. Ideally we should examine the velocity vector\n\t\t# directions, and find the distance traveled into the object that way\n\t\tif ocount != 3: return None # Must overlap on all axii, or we aren't overlapped\n\t\telse: # Here we're overlapped\n\t\t\tif not all(hitside): # Fully enclosed\n\t\t\t\t\tfor i in range(3):\n\t\t\t\t\t\t\thitside[i] = min(abs(o.pos[i]-o2.pos[i]), abs(o2.pos[i]-o.pos[i]))\n\t\t\t\t\treturn hitside\n\t\t\t # Here just a normal overlap\n\t\t\t#self.log(\"hitside:: {}\".format(hitside))\n\t\t\tminoverlap = min([abs(x) for x in hitside if x != 0])\n\t\t\t#self.log(\"hitside1 {}\".format(hitside))\n\t\t\thitside = [0 if abs(x)>minoverlap else x for x in hitside]\n\t\t\t#self.log(\"hitside2 {}\".format(hitside))\n\n\t\t\tself.draw_object(o, color=self.c_red)\n\t\t\tself.draw_object(o2, color=self.c_mag)\n\t\t\tself.wref()\n\t\t\tsleep(.5)\n\n\t\t\treturn hitside\n\nclass Size: # Unused\n\tdef __init__(x, y, z):\n\t\traise ValueError(\"Unimplemented/Incomplete Size class\")\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.z = z\n\tdef x(self):\n\t\treturn x\n\tdef y(self):\n\t\treturn y\n\tdef z(self):\n\t\treturn z\n\nclass Object:\n\t#def __init__(self, **kwargs):\n\tdef __init__(self, typ=None, fixed=False, size=(1,1,1), pic=\"x\",\n\t\t\tweight=1, animated=False, hitside=None):\n\t\tself.pos = None # np array\n\t\tself.__vel = np.array([0,0,0], dtype=np.float64)\n\t\tself.acc = np.array([0,0,0], dtype=np.float64)\n\t\tself.typ = typ\n\t\tself.size = size\n\t\tself.pic = pic\n\t\tself.weight = np.array(weight, dtype=np.float64)\n\t\tself.fixed = fixed\n\t\tself.animated = animated # If object is self-animating\n\t\tself.hitside = hitside\n\t\tself.oldpos = None\n\tdef type(self):\n\t\tif self.typ == World.t_human: return \"Human\"\n\t\tif self.typ == World.t_fixedblock: return \"FixedBlock\"\n\t\tif self.typ == World.t_block: return \"Block\"\n\t\tif self.typ == World.t_bot: return \"Bot\"\n\t\tif self.typ == World.t_horizon: return \"Horizon\"\n\t\tif self.typ == World.t_nearground: return \"NearGround\"\n\t\treturn \"UNKNOWN-TYPE\"\n\tdef step(self): return\n\t@property\n\tdef vel(self): return self.__vel\n\t@vel.setter\n\tdef vel(self, v): self.__vel = np.array(v, dtype=np.float64)\n\tdef handle_overlap(self, o2, hitside): return\n\nclass Human(Object):\n\tdef __init__(self):\n\t\tself.health = 1\n\t\tself.velmax = np.array([2,2,2], dtype=np.float64)\n\t\tself.accmax = np.array([0,.02,.02], dtype=np.float64)\n\t\tself.acc_likelihood = .2\n\t\tsc_pic = [\n\t\t\t\" o \",\n\t\t\t\"-|-\",\n\t\t\t\"/ \\\\\",\n\t\t]\n\t\tself.animated = 1\n\t\tsuper().__init__(typ=World.t_human, size=(3,3,3), pic=sc_pic, weight=2)\n\tdef handle_overlap(self, o2, hitside):\n\t\tfor i in range(3):\n\t\t\tif hitside[i]:\n\t\t\t\tif max(abs(self.vel[i]), abs(hitside[i])) > .5:\n\t\t\t\t\tpass\n\t\t\t\t\t#self.world.log(\" self vel[{}] {} > hitside {}\".format(i, self.vel[i], hitside[i]))\n\t\t\t\t\t#pdb.set_trace()\n\t\t\t\t#self.vel[i] = -self.vel[i]\n\t\t\t\t#self.pos[i] += hitside[i]\n\t\t\t\t#self.world.log(\"Hitside: \" + hitside[i])\n\n\tdef step(self):\n\t\tif random.random() < self.acc_likelihood * self.health:\n\t\t\tself.acc = np.array([random.uniform(-v,v) for v in self.accmax],\n\t\t\t\tdtype=np.float64)\n\t\t\tself.vel += self.acc\n\t\tself.pos += self.vel * self.health\n\nclass FixedBlock(Object):\n\tdef __init__(self):\n\t\tsc_pic = [\n\t\t\t\"############\",\n\t\t\t\"############\",\n\t\t\t\"############\",\n\t\t]\n\t\tsuper().__init__(typ=World.t_fixedblock, size=(3,3,12), pic=sc_pic, fixed=True)\n\tdef step(self): return\n\nclass Horizon(Object):\n\tdef __init__(self):\n\t\tpic = \"------\"\n\t\tsc_pic = [\n\t\t\tpic,\n\t\t]\n\t\tsuper().__init__(typ=World.t_horizon, size=(1,1,len(pic)), pic=sc_pic, fixed=True)\n\tdef step(self): return\n\nclass NearGround(Object):\n\tdef __init__(self):\n\t\tpic = \"======\"\n\t\tsc_pic = [\n\t\t\tpic,\n\t\t]\n\t\tsuper().__init__(typ=World.t_nearground, size=(1,1,len(pic)), pic=sc_pic, fixed=True)\n\tdef step(self): return\n\n\nclass Block(Object):\n\tdef __init__(self):\n\t\tsc_pic = [\n\t\t\t\"[#]\",\n\t\t]\n\t\tsuper().__init__(typ=World.t_block, size=(1,3,3), pic=sc_pic, weight=1)\n\tdef step(self): return\n\nclass Bot(Object):\n\tdef __init__(self):\n\t\tself.health = 1\n\t\tself.velmax = np.array([2,2,2], dtype=np.float64)\n\t\tself.accmax = np.array([0,.02,.02], dtype=np.float64)\n\t\tself.acc_likelihood = .2\n\t\tsc_pic = [\n\t\t\t\".O.\",\n\t\t\t\"o o\",\n\t\t]\n\t\tsuper().__init__(typ=World.t_bot, size=(2,3,3), pic=sc_pic, weight=1)\n\t\tself.animated = 1\n\tdef step(self):\n\t\tif random.random() < self.acc_likelihood * self.health:\n\t\t\tself.acc = np.array([\n\t\t\t\t\trandom.uniform(-self.accmax[0], self.accmax[0]),\n\t\t\t\t\trandom.uniform(-self.accmax[1], self.accmax[1]),\n\t\t\t\t\trandom.uniform(-self.accmax[2], self.accmax[2]),\n\t\t\t\t], dtype=np.float64)\n\t\t\tself.vel += self.acc\n\t\tself.pos += self.vel * self.health\n\nclass Goal(Object):\n\tdef __init__(self):\n\t\tsc_pic = [\n\t\t\t\"/~\\\\\",\n\t\t\t\"\\\\~/\",\n\t\t]\n\t\tsuper().__init__(typ=World.t_bot, size=(3,3,3), pic=sc_pic, weight=1)\n\n# vim:ts=2 ai sw=2\n","repo_name":"jaggzh/prime-directive","sub_path":"world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":16448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"14185165640","text":"from utils.api import APIView\nfrom utils.exceptions import SettingNotSet\n\nfrom .serializers import SettingSerilizer\nfrom .utils import AppSetting\n\n\nclass SettingView(APIView):\n def post(self, request):\n serializer = SettingSerilizer(data=request.data)\n if not serializer.is_valid():\n self.error(serializer.errors)\n key = serializer.validated_data['key']\n value = AppSetting.get_default(key)\n if not value:\n raise SettingNotSet\n return self.success(value)\n","repo_name":"ZhaoQi99/DevOps-Docker","sub_path":"qops_server/setting/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"1310398107","text":"import os\r\nimport sys\r\nfrom pickletools import int4\r\nimport time\r\n\r\nFILE_PATH = '../hotstuff/benchmark/logs/'\r\n\r\nproposer_list = []\r\nproof_of_attempt_of_safety_attack = []\r\nproof_of_attempt_of_non_safety_attack = []\r\nvoter_list = []\r\nhandler_list = []\r\nprocesser_list = []\r\n\r\nLIVENESS_OR_ACCIDENT = \"This node's current action is attempting liveness attack or just is caused by timeout, ignore for this project since this project just concerns safety attack\"\r\nLIVENESS_ATTACK_TWO_PT_FOUR = \"This node's is attempting liveness attack 2.4 that in current round it proposes a proposal with a round number even bigger to try overflowing\"\r\nSAFETY_ATTACK_1 = \"This node is performing attack 1 that proposes multiple proposals with different QCs but same round numbers\"\r\nSAFETY_ATTACK_2_2 = \"This node is performing attack 2.2 that with less round number than current round number and was not the leader for that round\"\r\nSAFETY_ATTACK_2_1 = \"This node is performing attack 2.1 that with less round number than current round number and was the leader for that round but proposes a different QC\"\r\n# As normal detection in writeup\r\nSAFETY_ATTACK_3_CORRECT_VOTE_REPORT = \"This node is performing attack 3.1 that it votes for proposals in attack 1, and it is reporting CORRECT voting information\"\r\n# As optimization for avoiding malicious nodes don't show vote message or show wrong vote message to cause false negative\r\nSAFETY_ATTACK_3_NO_OR_WRONG_VOTE_REPORT = \"This node is performing attack 3.2 that it votes for proposals in attack 1, and it is reporting NO OR WRONG voting information\"\r\n# Below is similar to above\r\nSAFETY_ATTACK_4_2_CORRECT_VOTE_REPORT = \"This node is performing attack 4.2.1 that it votes for proposals in attack 2.2, and it is reporting CORRECT handling information\"\r\nSAFETY_ATTACK_4_2_NO_OR_WRONG_VOTE_REPORT = \"This node is performing attack 4.2.2 that it votes for proposals in attack 2.2, and it is reporting NO OR WRONG handling information\"\r\nSAFETY_ATTACK_4_1_CORRECT_VOTE_REPORT = \"This node is performing attack 4.1.1 that it votes for proposals in attack 2.1, and it is reporting CORRECT processing information\"\r\nSAFETY_ATTACK_4_1_NO_OR_WRONG_VOTE_REPORT = \"This node is performing attack 4.1.2 that it votes for proposals in attack 2.1, and it is reporting NO OR WRONG processing information\"\r\n\r\ncommittedRoundToQc = {}\r\nlivenessRoundToQc = {}\r\nsafetyOneRoundToQc = {}\r\nsafetyattackRound = set()\r\n\r\nattack_1_reporter_to_roundqc = {}\r\nattack_2_reporter_to_roundqc = {}\r\nattack_liveness_reporter_to_roundqc = {}\r\n\r\nclass Node():\r\n def __init__(self, name:str, round_number:int, qc_round_number:int):\r\n self.name = name\r\n self.round_number = round_number\r\n self.qc_round_number = qc_round_number\r\n\r\nclass Proposer(Node):\r\n def __init__(self, name, round_number, qc_round_number):\r\n super().__init__(name, round_number, qc_round_number)\r\n \r\n def __eq__(self, __o: object) -> bool:\r\n if (isinstance(__o, Proposer)):\r\n return self.name == __o.name and self.round_number == __o.round_number\\\r\n and self.qc_round_number == __o.qc_round_number\r\n return False\r\n\r\nclass Handler(Node):\r\n def __init__(self, name, round_number, qc_round_number, leader=None):\r\n super().__init__(name, round_number, qc_round_number)\r\n self.leader=leader\r\n\r\nclass Processer(Node):\r\n def __init__(self, name, round_number, qc_round_number, leader=None):\r\n super().__init__(name, round_number, qc_round_number)\r\n self.leader=leader\r\n\r\nclass Voter(Node):\r\n def __init__(self, name, round_number, qc_round_number, leader=None):\r\n super().__init__(name, round_number, qc_round_number)\r\n self.leader=leader\r\n\r\nclass MaliciousNode(Node):\r\n def __init__(self, name, round_number, qc_round_number, attack_id, description):\r\n super().__init__(name, round_number, qc_round_number)\r\n self.attack_id = attack_id\r\n self.description = description\r\n def __eq__(self, __o: object) -> bool:\r\n if (isinstance(__o, MaliciousNode)):\r\n return self.name == __o.name and self.round_number == __o.round_number\\\r\n and self.qc_round_number == __o.qc_round_number and self.attack_id == __o.attack_id \\\r\n and self.description == __o.description\r\n return False\r\n\r\nclass MaliciousButDueToLivenessOrAccidentNode(Node):\r\n def __init__(self, name, round_number, qc_round_number, description):\r\n super().__init__(name, round_number, qc_round_number)\r\n self.description = description\r\n def __eq__(self, __o: object) -> bool:\r\n if (isinstance(__o, MaliciousButDueToLivenessOrAccidentNode)):\r\n return self.name == __o.name and self.round_number == __o.round_number\\\r\n and self.qc_round_number == __o.qc_round_number and self.description == __o.description\r\n return False\r\n\r\ndef addProposer(new_proposer):\r\n proposer_list.append(new_proposer)\r\n\r\n#https://www.tutorialspoint.com/How-to-sort-the-objects-in-a-list-in-Python#:~:text=How%20to%20sort%20the%20objects%20in%20a%20list,just%20pass%20in%20the%20reverse%20parameter%20as%20well.\r\ndef getObjKeyByRoundNumber(obj):\r\n return obj.round_number\r\n\r\ndef parseLog():\r\n node_files = [f_name for f_name in os.listdir(FILE_PATH)\\\r\n if f_name.startswith('node')]\r\n # First pass loop get all proposers for creating blocks\r\n for file in node_files:\r\n with open(FILE_PATH+file) as f:\r\n lines = f.readlines()\r\n node_name = ''\r\n for i in range(0, len(lines)):\r\n \r\n line_list = lines[i].split(' ')\r\n # print(line_list)\r\n # Assign node name\r\n if len(line_list) >= 7 and line_list[6] == 'successfully':\r\n node_name = line_list[5]\r\n #print(node_name)\r\n # Record created block info, or detect liveness/accident if duplicate\r\n if len(line_list) >= 6 and line_list[4] == 'Created' and line_list[5] == 'with':\r\n # Don't add duplicate, or add proposer and what is proposed\r\n \r\n if (len(proposer_list) == 0):\r\n addProposer(Proposer(node_name, int(line_list[8]), int(line_list[11])))\r\n #addCreateCommitInfo(node_name, int(line_list[8]), int(line_list[11]))\r\n else:\r\n if (Proposer(node_name, int(line_list[8]), int(line_list[11])) != proposer_list[-1]):\r\n addProposer(Proposer(node_name, int(line_list[8]), int(line_list[11])))\r\n #addCreateCommitInfo(node_name, int(line_list[8]), int(line_list[11]))\r\n else:\r\n detectLivenessOrAccident(node_name, int(line_list[8]), int(line_list[11]), LIVENESS_OR_ACCIDENT)\r\n # Second pass loop detect attacks or parse attacks related info to analysis further later\r\n for file in node_files:\r\n with open(FILE_PATH+file) as f:\r\n lines = f.readlines()\r\n node_name = ''\r\n for i in range(0, len(lines)):\r\n \r\n line_list = lines[i].split(' ')\r\n # print(line_list)\r\n # Assign node name\r\n if len(line_list) >= 7 and line_list[6] == 'successfully':\r\n node_name = line_list[5]\r\n \r\n # Parse Committed block Info\r\n if len(line_list) >= 7 and line_list[4] == 'Committed' and line_list[6] == 'with':\r\n committedRoundToQc[int(line_list[5][1:])] = int(line_list[8])\r\n \r\n # Detect Attack 1\r\n if len(line_list) >= 7 and line_list[4] == 'Attack' and line_list[5] == '1':\r\n # print(lines[i+2])\r\n # Sometimes there will be \"created bxx\" logged before safety check log\r\n\r\n # Following many while is for parsing correct info that ignores those randomly timeout message inserted\r\n pos = 2\r\n while len(lines[i+pos].split(' ')) < 8 or (lines[i+pos].split(' ')[6] != 'Safety' and lines[i+pos].split(' ')[7] != '1'):\r\n pos += 1\r\n locationSRO = pos\r\n while len(lines[i+pos].split(' ')) < 8 or (lines[i+pos].split(' ')[6] != 'Safety' and lines[i+pos].split(' ')[7] != '2'):\r\n pos += 1\r\n locationSRT = pos\r\n while len(lines[i+pos].split(' ')) < 5 or lines[i+pos].split(' ')[4] != \"Proposal's\":\r\n pos += 1\r\n locationAI = pos\r\n while len(lines[i+pos].split(' ')) < 5 or lines[i+pos].split(' ')[4] != \"I\":\r\n pos += 1\r\n locationReporter = pos\r\n '''if lines[i+2].split(' ')[4] == \"Created\":\r\n locationSRO += 2\r\n locationSRT += 2\r\n locationAI += 2\r\n locationReporter += 2'''\r\n checkSafetyRuleOne = lines[i+locationSRO].split(' ')[9].strip()\r\n checkSafetyRuleTwo = lines[i+locationSRT].split(' ')[9].strip()\r\n # Three scenarios:\r\n # 1. Safety Rule 1 true and Safety Rule 2 false: Liveness\r\n # 2. Safety Rule 1 false and Safety Rule 2 false: Safety Attack 1\r\n # 3. Other: Liveness Or Accident\r\n line_attack_info = lines[i+locationAI].split(' ')\r\n if checkSafetyRuleOne == \"true\" and checkSafetyRuleTwo == \"false\":\r\n detectLivenessOrAccident(line_attack_info[6].strip(), round_number=int(line_attack_info[9].strip()), \\\r\n qc_round_number=int(line_attack_info[17].strip()), description=LIVENESS_ATTACK_TWO_PT_FOUR)\r\n attackReporter(-1, lines[i+locationReporter].split(' ')[7].strip(), int(line_attack_info[9].strip()), int(line_attack_info[17].strip()))\r\n # Optimizing for improving false positive: sometimes honest nodes report but curr round - 1 = block round due to \r\n # concurrency issue that global round is updated but block is sent slower, thus consider this scenario as honest behavior \r\n elif checkSafetyRuleOne == \"false\" and checkSafetyRuleTwo == \"false\" and \\\r\n (int(line_attack_info[9].strip()) == int(line_attack_info[13].strip()) or int(line_attack_info[9].strip()) + 1 == int(line_attack_info[13].strip())):\r\n detectSafetyAttack(line_attack_info[6].strip(), int(line_attack_info[9].strip()), int(line_attack_info[17].strip()),\\\r\n 1, SAFETY_ATTACK_1)\r\n attackReporter(1, lines[i+locationReporter].split(' ')[7].strip(), int(line_attack_info[9].strip()), int(line_attack_info[17].strip()))\r\n\r\n # Detect attack 2\r\n if len(line_list) >= 7 and line_list[4] == 'Attack' and line_list[5] == '2' \\\r\n and line_list[6] == 'definitely':\r\n line_attack_info = lines[i+1].split(' ')\r\n detectSafetyAttack(line_attack_info[6].strip(), int(line_attack_info[9].strip()), int(line_attack_info[17].strip()),\\\r\n 2, SAFETY_ATTACK_2_2)\r\n attackReporter(2, lines[i+2].split(' ')[6].strip(), int(lines[i+1].split(' ')[9]), int(lines[i+1].split(' ')[17]))\r\n if len(line_list) >= 7 and line_list[4] == 'Attack' and line_list[5] == '2' \\\r\n and line_list[6] == 'potentially':\r\n \r\n # IMPORTANT: round check control false positive that\r\n # eg: when someone doing attack 2.1, there will be two proposers\r\n # P1(round=15,leader=L1,qc_round_number=14) which is honest,\r\n # And the malicious proposer, if it wants to do safety attack, it must\r\n # proposes a P with qc less than 14\r\n # Thus the other is P2(round=15,leader=Lmalicious, qc_round_number=0)\r\n # And in previous sorting P2 is in front of P1, and will be checked while\r\n # leaving P1 innocent. And if there's multiple malicious proposals Pn with\r\n # same round number, all of them other than the correct one will be checked\r\n # Limitation: there's edge case that if there's innocent proposal with \r\n # P1(round=15,leader=L1,qc_round_number=13) while malicious proposal with\r\n # P1(round=15,leader=L1,qc_round_number=14), then cannot detect\r\n\r\n # FIX BUG 2: ATTACK 2.2 NOT HADNLED BECAUSE DIDN'T SORT PROPOSER_LIST DYNAMICALLY\r\n proposer_list.sort(key=lambda x: (x.round_number))\r\n for proposer_index in range(0, len(proposer_list)-1):\r\n #print( proposer_list[proposer_index].round_number, proposer_list[proposer_index].qc_round_number,)\r\n if proposer_list[proposer_index].round_number != proposer_list[proposer_index+1].round_number:\r\n continue\r\n if proposer_list[proposer_index].qc_round_number == proposer_list[proposer_index+1].qc_round_number:\r\n continue\r\n # This is for 2.1. Rp = Rc, Lp != Lc\r\n # Optimization: Above can cause false porisitve when message , thus do above if statement\r\n '''if proposer_list[proposer_index].name != lines[i+2].split(' ')[6] and proposer_list[proposer_index].round_number == int(lines[i+2].split(' ')[9])\\\r\n :\r\n #print(\"attack 2.1 here\", lines[i+2])\r\n detectSafetyAttack(lines[i+2].split(' ')[6], int(lines[i+2].split(' ')[9].strip()), int(lines[i+2].split(' ')[17].strip()),\\\r\n 2, SAFETY_ATTACK_2_1)\r\n attackReporter(2, lines[i+3].split(' ')[7].strip(), int(lines[i+2].split(' ')[9]), int(lines[i+2].split(' ')[17]))\r\n continue'''\r\n try:\r\n if proposer_list[proposer_index].name == lines[i+2].split(' ')[6] and proposer_list[proposer_index].round_number == int(lines[i+2].split(' ')[9])\\\r\n and proposer_list[proposer_index].qc_round_number != int(lines[i+2].split(' ')[17]):\r\n \r\n detectSafetyAttack(lines[i+2].split(' ')[6], int(lines[i+2].split(' ')[9].strip()), int(lines[i+2].split(' ')[17].strip()),\\\r\n 2, SAFETY_ATTACK_2_1)\r\n attackReporter(2, lines[i+3].split(' ')[7].strip(), int(lines[i+2].split(' ')[9]), int(lines[i+2].split(' ')[17]))\r\n except:\r\n if proposer_list[proposer_index].name == lines[i+3].split(' ')[6] and proposer_list[proposer_index].round_number == int(lines[i+3].split(' ')[9])\\\r\n and proposer_list[proposer_index].qc_round_number != int(lines[i+2].split(' ')[17]):\r\n #print(\"attack 2.2 here\", lines[i+2])\r\n detectSafetyAttack(lines[i+3].split(' ')[6], int(lines[i+3].split(' ')[9].strip()), int(lines[i+3].split(' ')[17].strip()),\\\r\n 2, SAFETY_ATTACK_2_1)\r\n attackReporter(2, lines[i+4].split(' ')[7].strip(), int(lines[i+4].split(' ')[9]), int(lines[i+3].split(' ')[17]))\r\n \r\n # Record voter info to try detect malicious voters for attack 3 later\r\n if len(line_list) >= 6 and line_list[4] == \"I'm\" and line_list[5] == \"voter\":\r\n voter_list.append(Voter(name=node_name, round_number=int(line_list[14].strip()), \\\r\n qc_round_number=int(line_list[17].strip()), leader=line_list[20].strip()))\r\n # Record handler info to try detect malicious voters for attack 4.1 later\r\n if len(line_list) >= 6 and line_list[4] == \"I'm\" and line_list[5] == \"handler\":\r\n handler_list.append(Handler(name=node_name, round_number=int(line_list[14].strip()), \\\r\n qc_round_number=int(line_list[17].strip()), leader=line_list[20].strip()))\r\n # Record processor info to try detect malicious voters for attack 4.2 later\r\n if len(line_list) >= 6 and line_list[4] == \"I'm\" and line_list[5] == \"processor\":\r\n processer_list.append(Processer(name=node_name, round_number=int(line_list[14].strip()), \\\r\n qc_round_number=int(line_list[17].strip()), leader=line_list[20].strip()))\r\n \r\n\r\n # sort proposer by round number\r\n proposer_list.sort(key=lambda x: (x.name, x.round_number))\r\n proof_of_attempt_of_safety_attack.sort(key=getObjKeyByRoundNumber)\r\n proof_of_attempt_of_non_safety_attack.sort(key=getObjKeyByRoundNumber)\r\n \r\n # https://www.techiedelight.com/sort-list-of-objects-by-multiple-attributes-python/#:~:text=A%20Pythonic%20solution%20to%20in-place%20sort%20a%20list,key%20and%20reverse%20and%20produces%20a%20stable%20sort.\r\n voter_list.sort(key=lambda x: (x.name, x.round_number))\r\n handler_list.sort(key=lambda x: (x.name, x.round_number))\r\n\r\n # Remove duplicate \r\n if len(proof_of_attempt_of_safety_attack) != 0:\r\n remove_duplicate_safety_attack()\r\n if len(proof_of_attempt_of_non_safety_attack) != 0:\r\n remove_duplicate_liveness_attack()\r\n #for proposer in proposer_list:\r\n #print(proposer.name, proposer.round_number, proposer.qc_round_number)\r\n #print(committedRoundToQc)\r\n \r\n\r\ndef attackReporter(attack_id, reporter, round, qc_round):\r\n if attack_id == 1:\r\n if attack_1_reporter_to_roundqc.get(reporter, \"default\") == \"default\":\r\n #print(\"iii\",reporter)\r\n attack_1_reporter_to_roundqc[reporter] = [[round, qc_round]]\r\n else:\r\n #print(\"reporter\",reporter)\r\n attack_1_reporter_to_roundqc[reporter].append([round, qc_round])\r\n elif attack_id == 2:\r\n if attack_2_reporter_to_roundqc.get(reporter, \"default\") == \"default\":\r\n attack_2_reporter_to_roundqc[reporter] = [[round, qc_round]]\r\n else:\r\n attack_2_reporter_to_roundqc[reporter].append([round, qc_round])\r\n else:\r\n if attack_liveness_reporter_to_roundqc.get(reporter, \"default\") == \"default\":\r\n attack_liveness_reporter_to_roundqc[reporter] = [[round, qc_round]]\r\n else:\r\n attack_liveness_reporter_to_roundqc[reporter].append([round, qc_round])\r\n\r\n# Util for adding safety attack proof for one malicious node\r\ndef detectSafetyAttack(name, round_number, qc_round_number, attack_id, description):\r\n proof_of_attempt_of_safety_attack.append(MaliciousNode(name=name,\\\r\n round_number=round_number, qc_round_number=qc_round_number, \\\r\n attack_id=attack_id, description=description))\r\n\r\n# Util for removing dup\r\ndef remove_duplicate_safety_attack():\r\n #print(\"DEDUP\")\r\n if len(proof_of_attempt_of_safety_attack) == 0:\r\n return\r\n deduplicated_proof_of_attempt_of_safety_attack = [proof_of_attempt_of_safety_attack[0]]\r\n for i in range(1, len(proof_of_attempt_of_safety_attack)):\r\n if proof_of_attempt_of_safety_attack[i] != proof_of_attempt_of_safety_attack[i-1]:\r\n deduplicated_proof_of_attempt_of_safety_attack.append(proof_of_attempt_of_safety_attack[i])\r\n proof_of_attempt_of_safety_attack.clear()\r\n for i in deduplicated_proof_of_attempt_of_safety_attack:\r\n safetyattackRound.add(i.round_number)\r\n proof_of_attempt_of_safety_attack.append(i)\r\n\r\n# Util for removing dup\r\ndef remove_duplicate_liveness_attack():\r\n #print(\"DEDUP\")\r\n if len(proof_of_attempt_of_non_safety_attack) == 0:\r\n return\r\n deduplicated_proof_of_attempt_of_non_safety_attack = [proof_of_attempt_of_non_safety_attack[0]]\r\n for i in range(1, len(proof_of_attempt_of_non_safety_attack)):\r\n if proof_of_attempt_of_non_safety_attack[i] != proof_of_attempt_of_non_safety_attack[i-1]:\r\n deduplicated_proof_of_attempt_of_non_safety_attack.append(proof_of_attempt_of_non_safety_attack[i])\r\n proof_of_attempt_of_non_safety_attack.clear()\r\n for i in deduplicated_proof_of_attempt_of_non_safety_attack:\r\n proof_of_attempt_of_non_safety_attack.append(i)\r\n\r\n# Detect Attack 1: One leader proposes proposals with different QCs but same round numbers within 1 round\r\n# Also detect Attack 2.1: \r\n# One leader proposes proposals with non-consecutive round number R\r\n# Suppose current round number CR,\r\n# Current leader is L\r\n# Leader in R was PL\r\n# If R < CR, L ! PL\r\n# Then Attack 2.1\r\n# Also detect Attack 2.2: \r\n# One leader proposes proposals with non-consecutive round number R\r\n# Suppose current round number CR,\r\n# Current leader is L\r\n# Leader in R was PL\r\n# L's proposal's QC in CR is QC\r\n# RL's proposal was PQC\r\n# If R < CR, L = PL, and QC != PQC\r\n# Then Attack 2.2\r\n \r\n\r\n# Just put liveness or accidental stuff inside \r\ndef detectLivenessOrAccident(name, round_number, qc_round_number, description):\r\n proof_of_attempt_of_non_safety_attack.append(MaliciousButDueToLivenessOrAccidentNode(name, round_number, qc_round_number, description ))\r\n\r\n# Detect attack 2.2\r\ndef detectAttackTwoPtTwo():\r\n for i in range(1,len(proposer_list)):\r\n # FIX BUG 3: proposer_list[i].qc_round_number != proposer_list[i-1].qc_round_number WAS \r\n # NOT INCLUDED AT FIRST SO SOME LIVENESS ATTACK ARE MISINTERPRETED AS \r\n if proposer_list[i].name == proposer_list[i-1].name and \\\r\n proposer_list[i].round_number == proposer_list[i-1].round_number and \\\r\n proposer_list[i].qc_round_number != proposer_list[i-1].qc_round_number and \\\r\n proposer_list[i].round_number not in safetyattackRound:\r\n detectSafetyAttack(proposer_list[i].name, proposer_list[i].round_number, \\\r\n proposer_list[i].qc_round_number, 2, SAFETY_ATTACK_2_1)\r\n\r\ndef initDetector():\r\n detectAttackTwoPtTwo()\r\n\r\n# Detect attack 3\r\ndef detectAttackThree():\r\n if len(proof_of_attempt_of_safety_attack) == 0:\r\n return\r\n attack_one_list = []\r\n # Gather all leaders doing attack 1\r\n for attack in proof_of_attempt_of_safety_attack:\r\n if attack.attack_id == 1:\r\n attack_one_list.append(attack)\r\n #print(attack.name,attack.round_number, attack.qc_round_number, attack.description )\r\n \r\n for attack in attack_one_list:\r\n for voter_index in range(0, len(voter_list)):\r\n #print(voter_list[voter_index].name, voter_list[voter_index].round_number, voter_list[voter_index].qc_round_number, voter_list[voter_index].leader)\r\n \r\n # Find 3.1 by comparing if voter shows voting message that matches the malicious leader's behavior in Attack 1\r\n if voter_list[voter_index].round_number == attack.round_number and \\\r\n voter_list[voter_index].qc_round_number == attack.qc_round_number and\\\r\n voter_list[voter_index].leader == attack.name and\\\r\n proof_of_attempt_of_safety_attack[-1] != MaliciousNode(name=voter_list[voter_index].name, round_number=voter_list[voter_index].round_number,\\\r\n qc_round_number=voter_list[voter_index].qc_round_number, attack_id=3, description=SAFETY_ATTACK_3_CORRECT_VOTE_REPORT):\r\n detectSafetyAttack(name=voter_list[voter_index].name, round_number=voter_list[voter_index].round_number,\\\r\n qc_round_number=voter_list[voter_index].qc_round_number, attack_id=3, description=SAFETY_ATTACK_3_CORRECT_VOTE_REPORT)\r\n #print(voter_list[voter_index].name, voter_list[voter_index].round_number, voter_list[voter_index].qc_round_number)\r\n\r\n # Find 3.2 by seeing whether voter did report the malicious leader for Attack 1 by comparing with attack_1_reporter_to_roundqc\r\n try:\r\n #print(attack_1_reporter_to_roundqc)\r\n if voter_list[voter_index].round_number == attack.round_number and \\\r\n voter_list[voter_index].qc_round_number != attack.qc_round_number and\\\r\n voter_list[voter_index+1].round_number != attack.round_number:\r\n innocent = False\r\n for attacker in proof_of_attempt_of_safety_attack:\r\n if attacker.description == SAFETY_ATTACK_3_CORRECT_VOTE_REPORT and voter_list[voter_index].name ==attacker.name:\r\n innocent = True\r\n \r\n break\r\n if attack_1_reporter_to_roundqc.get(voter_list[voter_index].name, \"default\") != \"default\":\r\n for i in attack_1_reporter_to_roundqc[voter_list[voter_index].name]:\r\n if i[0] == attack.round_number and i[1] == attack.qc_round_number:\r\n innocent = True\r\n break\r\n\r\n if innocent == False:\r\n detectSafetyAttack(name=voter_list[voter_index].name, round_number=attack.round_number,\\\r\n qc_round_number=attack.qc_round_number, attack_id=3, description=SAFETY_ATTACK_3_NO_OR_WRONG_VOTE_REPORT)\r\n \r\n except:\r\n if attack_1_reporter_to_roundqc.get(voter_list[voter_index].name, \"default\") != \"default\":\r\n for i in attack_1_reporter_to_roundqc[voter_list[voter_index].name]:\r\n if i[0] == attack.round_number and i[1] == attack.qc_round_number:\r\n innocent = True\r\n break\r\n\r\n if innocent == False:\r\n detectSafetyAttack(name=voter_list[voter_index].name, round_number=voter_list[voter_index].round_number,\\\r\n qc_round_number=voter_list[voter_index].qc_round_number, attack_id=3, description=SAFETY_ATTACK_3_NO_OR_WRONG_VOTE_REPORT)\r\n \r\n\r\n# Detect attack 4\r\ndef detectAttackFour():\r\n attack_two_pt_one_list = []\r\n # Gather all leaders doing attack 2.1\r\n for attack in proof_of_attempt_of_safety_attack:\r\n if attack.description == SAFETY_ATTACK_2_2:\r\n attack_two_pt_one_list.append(attack)\r\n #print(attack.name,attack.round_number, attack.qc_round_number, attack.description )\r\n for attack in attack_two_pt_one_list:\r\n for handler_index in range(0, len(handler_list)):\r\n #print(\"attack 4\", handler.name, handler.round_number, handler.qc_round_number)\r\n #print(\"mali\", attack.name, attack.round_number, attack.qc_round_number)\r\n # need to check leader too to avoid false positive like the 1 0, 1 0 examples\r\n\r\n # detect attack 4.1.1\r\n if handler_list[handler_index].round_number == attack.round_number and \\\r\n handler_list[handler_index].qc_round_number == attack.qc_round_number and \\\r\n handler_list[handler_index].leader == attack.name and\\\r\n proof_of_attempt_of_safety_attack[-1] != MaliciousNode(name=handler_list[handler_index].name, round_number=handler_list[handler_index].round_number,\\\r\n qc_round_number=handler_list[handler_index].qc_round_number, attack_id=4, description=SAFETY_ATTACK_4_2_CORRECT_VOTE_REPORT):\r\n detectSafetyAttack(name=handler_list[handler_index].name, round_number=handler_list[handler_index].round_number,\\\r\n qc_round_number=handler_list[handler_index].qc_round_number, attack_id=4, description=SAFETY_ATTACK_4_2_CORRECT_VOTE_REPORT)\r\n \r\n # detect attack 4.1.2\r\n try:\r\n #print(attack_2_reporter_to_roundqc)\r\n #print(\"handler\", handler_list[handler_index].name, handler_list[handler_index].round_number, handler_list[handler_index].qc_round_number)\r\n # if this node doesn not report the bad leader in attack 4.1\r\n if handler_list[handler_index].name != handler_list[handler_index+1].name\\\r\n and handler_list[handler_index].name not in list(attack_2_reporter_to_roundqc.keys()):\r\n innocent = False\r\n #print(\"handler\", handler_list[handler_index].name, handler_list[handler_index].round_number, handler_list[handler_index].qc_round_number)\r\n # check if already considered doing attack 4.1.1 and skip\r\n for attacker in proof_of_attempt_of_safety_attack:\r\n if attacker.description == SAFETY_ATTACK_4_2_CORRECT_VOTE_REPORT and handler_list[handler_index].name ==attacker.name:\r\n innocent = True\r\n break\r\n \r\n # check if didn't report or wrongly report\r\n if attack_2_reporter_to_roundqc.get(handler_list[handler_index].name, \"default\") != \"default\":\r\n for i in attack_2_reporter_to_roundqc[handler_list[handler_index].name]:\r\n if i[0] == attack.round_number and i[1] == attack.qc_round_number:\r\n innocent = True\r\n break\r\n\r\n if innocent == False:\r\n detectSafetyAttack(name=handler_list[handler_index].name, round_number=attack.round_number,\\\r\n qc_round_number=attack.qc_round_number, attack_id=4, description=SAFETY_ATTACK_4_2_NO_OR_WRONG_VOTE_REPORT)\r\n \r\n except: # if last name in handler_list\r\n if handler_list[handler_index].name not in list(attack_2_reporter_to_roundqc.keys()):\r\n innocent = False\r\n #print(\"handler\", handler_list[handler_index].name, attack.round_number, attack.qc_round_number)\r\n # check if already considered doing attack 4.1.1and skip\r\n for attacker in proof_of_attempt_of_safety_attack:\r\n if attacker.description == SAFETY_ATTACK_4_2_CORRECT_VOTE_REPORT and handler_list[handler_index].name ==attacker.name:\r\n innocent = True\r\n \r\n break\r\n \r\n # check if didn't report or wrongly report\r\n if attack_2_reporter_to_roundqc.get(handler_list[handler_index].name, \"default\") != \"default\":\r\n for i in attack_2_reporter_to_roundqc[handler_list[handler_index].name]:\r\n if i[0] == attack.round_number and i[1] == attack.qc_round_number:\r\n innocent = True\r\n break\r\n\r\n if innocent == False:\r\n detectSafetyAttack(name=handler_list[handler_index].name, round_number=attack.round_number,\\\r\n qc_round_number=attack.qc_round_number, attack_id=4, description=SAFETY_ATTACK_4_2_NO_OR_WRONG_VOTE_REPORT) \r\n attack_two_pt_two_list = []\r\n\r\n # Gather all leaders doing attack 2.2\r\n for attack in proof_of_attempt_of_safety_attack:\r\n if attack.description == SAFETY_ATTACK_2_2:\r\n attack_two_pt_two_list.append(attack)\r\n #print(\">>>\",attack.name,attack.round_number, attack.qc_round_number, attack.description )\r\n for attack in attack_two_pt_two_list:\r\n for processer_index in range(0, len(processer_list)):\r\n # need to check leader too to avoid false positive like the 1 0, 1 0 examples\r\n # detect 4.2.1\r\n\r\n\r\n # FIX BUG 4: forgot to include processer_list[processer_index].name not in attack_2_reporter_to_roundqc.keys()\r\n # to release innocent reporter\r\n if processer_list[processer_index].round_number == attack.round_number and \\\r\n processer_list[processer_index].qc_round_number == attack.qc_round_number and \\\r\n processer_list[processer_index].leader == attack.name and\\\r\n proof_of_attempt_of_safety_attack[-1] != MaliciousNode(name=processer_list[processer_index].name, round_number=processer_list[processer_index].round_number,\\\r\n qc_round_number=processer_list[processer_index].qc_round_number, attack_id=4, description=SAFETY_ATTACK_4_1_CORRECT_VOTE_REPORT) and \\\r\n processer_list[processer_index].name not in attack_2_reporter_to_roundqc.keys():\r\n detectSafetyAttack(name=processer_list[processer_index].name, round_number=processer_list[processer_index].round_number,\\\r\n qc_round_number=processer_list[processer_index].qc_round_number, attack_id=4, description=SAFETY_ATTACK_4_1_CORRECT_VOTE_REPORT)\r\n \r\n # detect 4.2.2\r\n try:\r\n if processer_list[processer_index].name != processer_list[processer_index+1].name\\\r\n and processer_list[processer_index].name not in list(attack_2_reporter_to_roundqc.keys()):\r\n innocent = False\r\n # check if already considered doing attack 4.2.1 for this proposal and skip\r\n for attacker in proof_of_attempt_of_safety_attack:\r\n if attacker.description == SAFETY_ATTACK_4_1_CORRECT_VOTE_REPORT and processer_list[processer_index].name ==attacker.name:\r\n innocent = True\r\n break\r\n \r\n # check if didn't report or wrongly report\r\n if attack_2_reporter_to_roundqc.get(processer_list[processer_index].name, \"default\") != \"default\":\r\n for i in attack_2_reporter_to_roundqc[processer_list[processer_index].name]:\r\n if i[0] == attack.round_number and i[1] == attack.qc_round_number:\r\n innocent = True\r\n break\r\n\r\n if innocent == False:\r\n detectSafetyAttack(name=processer_list[processer_index].name, round_number=attack.round_number,\\\r\n qc_round_number=attack.qc_round_number, attack_id=4, description=SAFETY_ATTACK_4_1_NO_OR_WRONG_VOTE_REPORT)\r\n except:# last node\r\n if processer_list[processer_index].name not in list(attack_2_reporter_to_roundqc.keys()):\r\n innocent = False\r\n # check if already considered doing attack 4.2.1 for this proposal and skip\r\n for attacker in proof_of_attempt_of_safety_attack:\r\n if attacker.description == SAFETY_ATTACK_4_1_CORRECT_VOTE_REPORT and processer_list[processer_index].name ==attacker.name:\r\n innocent = True\r\n break\r\n \r\n # check if didn't report or wrongly report\r\n if attack_2_reporter_to_roundqc.get(processer_list[processer_index].name, \"default\") != \"default\":\r\n for i in attack_2_reporter_to_roundqc[processer_list[processer_index].name]:\r\n if i[0] == attack.round_number and i[1] == attack.qc_round_number:\r\n innocent = True\r\n break\r\n\r\n if innocent == False:\r\n detectSafetyAttack(name=processer_list[processer_index].name, round_number=attack.round_number,\\\r\n qc_round_number=attack.qc_round_number, attack_id=4, description=SAFETY_ATTACK_4_1_NO_OR_WRONG_VOTE_REPORT)\r\n\r\n# FIX BUG 1: In 10_30 it will not be 100% precision because 1, Attack 1: issue: sometimes honest block will not receive block to vote, like 10_30's 14 that node 8, when the attacker is doing \r\n#arrack 1, the attacker sends 2 blocks. But this node 8 didn't receive the first honest block and didn't update self.last_voted_round,\r\n#and when it sees the second malicious block, it will say pass the safety rule 1 unexpectedly, which should not passed, causing it to be\r\n#not reporting malicious leader, thus false positive here on wronlg cathing node 8\r\n\r\n# Fix is depends on storing all attempts of liveness attacks, and any accidental message losses behavior\r\n# Then compare with this list of safety attacker that any attacker wrongly caught as doing safety\r\n# attacks 3 by not reporting malicious leaders will be considered innocent on safety attack if they \r\n# report and misconsidered as liveness\r\ndef detectLivenessWhichIsActuallySafetyAttack():\r\n innocent_voter = []\r\n for name, roundqc in attack_liveness_reporter_to_roundqc.items():\r\n for attacker in proof_of_attempt_of_safety_attack:\r\n for little_roundqc in roundqc:\r\n if name == attacker.name and little_roundqc[0] == attacker.round_number and \\\r\n little_roundqc[1] == attacker.qc_round_number:\r\n innocent_voter.append(attacker)\r\n temp_proof_of_attempt_of_safety_attack = []\r\n for attack in proof_of_attempt_of_safety_attack:\r\n temp_proof_of_attempt_of_safety_attack.append(attack)\r\n proof_of_attempt_of_safety_attack.clear()\r\n for attack in temp_proof_of_attempt_of_safety_attack:\r\n innocent = False\r\n for voter in innocent_voter:\r\n if voter.name == attack.name and voter.round_number == attack.round_number and\\\r\n voter.qc_round_number == attack.qc_round_number:\r\n innocent = True\r\n break\r\n if innocent == False:\r\n proof_of_attempt_of_safety_attack.append(attack)\r\n\r\ndef main():\r\n start_time = time.time()\r\n parseLog()\r\n initDetector()\r\n '''for i in voter_list:\r\n print(\"voter\", i.name, i.round_number, i.qc_round_number)\r\n for i in handler_list:\r\n print(\"handler\", i.name, i.round_number, i.qc_round_number)'''\r\n detectAttackThree()\r\n detectAttackFour()\r\n \r\n proof_of_attempt_of_safety_attack.sort(key=lambda x: (x.name, x.round_number, x.qc_round_number, x.description))\r\n remove_duplicate_safety_attack()\r\n detectLivenessWhichIsActuallySafetyAttack()\r\n print(\"\\n\\nFinal Safety Attack\")\r\n for i in proof_of_attempt_of_safety_attack:\r\n print(i.name, i.round_number, i.qc_round_number, i.description)\r\n \r\n elapsed_time = time.time() - start_time\r\n print(\"Time to detect malicious nodes: \", elapsed_time, \"seconds\")\r\n\r\nif __name__=='__main__':\r\n main()\r\n\r\ndef run(file, fn):\r\n global proposer_list\r\n global proof_of_attempt_of_safety_attack\r\n global proof_of_attempt_of_non_safety_attack\r\n global voter_list\r\n global handler_list\r\n global processer_list\r\n global livenessRoundToQc\r\n global safetyOneRoundToQc\r\n global safetyattackRound\r\n global attack_1_reporter_to_roundqc\r\n global attack_2_reporter_to_roundqc\r\n proposer_list = []\r\n proof_of_attempt_of_safety_attack = []\r\n proof_of_attempt_of_non_safety_attack = []\r\n voter_list = []\r\n handler_list = []\r\n processer_list = []\r\n livenessRoundToQc = {}\r\n safetyOneRoundToQc = {}\r\n safetyattackRound = set()\r\n attack_1_reporter_to_roundqc = {}\r\n attack_2_reporter_to_roundqc = {}\r\n temp = sys.stdout\r\n global FILE_PATH\r\n FILE_PATH = file\r\n print(FILE_PATH)\r\n f = open('tests/test' + str(fn) + '.txt','w')\r\n sys.stdout = f \r\n start_time = time.time()\r\n parseLog()\r\n initDetector()\r\n detectAttackThree()\r\n detectAttackFour()\r\n proof_of_attempt_of_safety_attack.sort(key=lambda x: (x.name, x.round_number, x.qc_round_number, x.description))\r\n remove_duplicate_safety_attack()\r\n detectLivenessWhichIsActuallySafetyAttack()\r\n print(\"Final Safety Attack\")\r\n for i in proof_of_attempt_of_safety_attack:\r\n print(i.attack_id, i.name, i.round_number, i.qc_round_number, i.description)\r\n \r\n elapsed_time = time.time() - start_time\r\n print(\"Time to detect malicious nodes: \", elapsed_time, \"seconds\")\r\n sys.stdout.flush()\r\n sys.stdout = temp\r\n f.close()","repo_name":"Exilehead/comp0064-dissertation","sub_path":"bft_detector.py","file_name":"bft_detector.py","file_ext":"py","file_size_in_byte":40687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21512564326","text":"# -*-coding:Utf-8 -*\n\n\"\"\"Ce fichier contient le code principal du jeu.\n\nExécutez-le avec Python pour lancer le jeu.\n\n\"\"\"\n\nimport os\nimport os, shutil\n\nfrom carte import Carte\nfrom labyrinthe import Labyrinthe\n\n\ndef chargeLabs():\n\t\"\"\" Cette fonction a pour but de charger les jeux en cours s'ils existent, ou \n\td'appeler la fonction qui chargera les cartes d'origine\n\t\"\"\"\n\tif os.listdir('labs') and os.listdir('labs') != []:\n\n\t\tprint('\\r \\n Il y a des parties enregistrées, voulez-vous continuer à jouer ?')\n\t\tlabs = []\n\t\tfor nom_fichier in os.listdir(\"labs\"):\n\t\t if nom_fichier.endswith(\".txt\"):\n\t\t chemin = os.path.join(\"labs\", nom_fichier)\n\t\t nom_lab = nom_fichier[:-3].lower()\n\t\t with open(chemin, \"r\") as fichier:\n\t\t contenu = fichier.read()\n\t\t carte = Carte(nom_fichier, contenu)\n\t\t labs.append(carte)\n\n\t\tprint(\"\\rParties en cours :\")\n\t\tfor i, lab in enumerate(labs):\n\t\t\tprint(\" {}\".format( lab.nom))\n\n\t\tchoix = raw_input('O/N : ')\n\n\n\t\tif choix == 'O':\n\t\t\tfor i, lab in enumerate(labs):\n\t\t\t\tprint(\" {} - {}\".format(i + 1, lab.nom))\n\t\t\tnumero_lab = input('tapez le chiffre du labyrinthe choisi :') - 1\n\t\t\tlab = Labyrinthe(labs[numero_lab])\n\n\t\telif choix == 'N':\n\t\t\tfor nom_fichier in os.listdir(\"labs\"):\n\t\t\t\tchemin = os.path.join(\"labs\", nom_fichier)\n\t\t\t\tos.unlink(chemin)\n\n\t\t\tlab = chargeCartes()\n\n\telse:\n\t\tlab = chargeCartes()\n\treturn lab\n\n\ndef chargeCartes():\n\t# Cette fonction charge les cartes d'origine.\n\tcartes = []\n\tfor nom_fichier in os.listdir(\"cartes\"):\n\t if nom_fichier.endswith(\".txt\"):\n\t chemin = os.path.join(\"cartes\", nom_fichier)\n\t nom_carte = nom_fichier[:-3].lower()\n\t with open(chemin, \"r\") as fichier:\n\t contenu = fichier.read()\n\t carte = Carte(nom_fichier, contenu)\n\t cartes.append(carte)\n\n\t# On affiche les cartes existantes\n\tprint(\"\\rLabyrinthes existants :\")\n\tfor i, carte in enumerate(cartes):\n\t print(\" {} - {}\".format(i + 1, carte.nom))\n\n\tnumero_lab = input('tapez le chiffre du labyrinthe choisi :') - 1\n\tlab = Labyrinthe(cartes[numero_lab])\n\treturn lab\n\n\n\nlab = chargeLabs()\n\nprint('Choisissez les options suivantes : \\n - N pour monter \\n - S pour descendre \\n - O pour aller à gauche \\n - E pour aller à droite \\n - Q pour quitter')\n\nchoix = ''\n\n\nwhile (choix.upper() != 'Q'): # Tant que le joueur n'a pas décidé de quitter la partie...\n\tprint(lab) # ... On montre l'état du labyrinthe ...\n\tchoix = raw_input('>') # ... On lui demande d'entrer une commande ...\n\tanalyse = lab.analyse(choix) # ... L'objet lab l'analyse ...\n\tif analyse is True: # On sort le résultat.\n\t\tchoix = 'Q'\n\t\tlab.finirJeu()\n\t\tprint('Vous avez gagné !')\n\telif analyse == 'On continue alors.':\n\t\tchoix = 'C'\n\t\tprint(analyse)\n\telse:\n\t\tprint(analyse)\n\n\n","repo_name":"gastrid/roboc","sub_path":"roboc.py","file_name":"roboc.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28224348275","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 9 12:58:09 2020\n\n@author: Anirudh Raghavan\n\"\"\"\n\nimport pandas as pd\nimport json\n\n\ndef search_exact(search, names_list, tmp_list):\n break_v = \"No\"\n for name in names_list:\n for item in tmp_list:\n if name in item:\n a = tmp_list.index(item)\n break_v = \"Yes\"\n break\n \n elif item == tmp_list[-1]:\n a = \"No \" + search\n \n if break_v == \"Yes\":\n break\n \n return a\n\ndef search_contain(search,names_list, tmp_list):\n for item in tmp_list:\n if all(x in item for x in names_list):\n result = tmp_list.index(item)\n break\n else:\n result = \"No \" + search\n return result\n \n\n\ndef inc_doc_sum (csv_name):\n \n with open(\"format_dict.json\") as json_file:\n format_dict = json.load(json_file)\n\n tmp = pd.read_csv(csv_name)\n tmp_values = list(tmp[tmp.columns[1]])\n tmp_list = list(tmp[tmp.columns[0]])\n tmp_list = [str(x) for x in tmp_list]\n tmp_list = [x.lower() for x in tmp_list]\n \n doc_dict = {}\n \n doc_dict['Stock'] = csv_name.split(\"_\")[0]\n doc_dict['Form'] = csv_name.split(\"_\")[1]\n doc_dict['Date'] = csv_name.split(\"_\")[-1].split(\".\")[0]\n \n for key in format_dict.keys():\n if key == \"Income before taxes\" or key == \"Dividends\" or key == \"Net Income\":\n result = search_contain(key, format_dict[key], tmp_list)\n else:\n result = search_exact(key, format_dict[key], tmp_list)\n \n if type(result) == int:\n doc_dict[key] = tmp_values[result]\n else:\n doc_dict[key] = result\n \n db = pd.read_csv(\"inc_format.csv\")\n db = db.append(doc_dict, ignore_index = True)\n db.to_csv(\"inc_format.csv\", index = False)\n\n \n\n\n#file1 = open(\"csv_list_income.txt\",\"r\")\n#result = file1.read()\n#result = result.split(\", \")\n\n","repo_name":"Ani-07/SEC-Web-Scraping","sub_path":"doc_combiner.py","file_name":"doc_combiner.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12746166285","text":"n = 12\r\n\r\nclass Solution(object):\r\n def numSquares(self, n):\r\n square_nums = [i**2 for i in range(1, int((n**0.5))+1)]\r\n print (square_nums)\r\n\r\n def minNumSquares(k):\r\n \"\"\" recursive solution \"\"\"\r\n # bottom cases: find a square number\r\n if k in square_nums:\r\n return 1\r\n min_num = float('inf')\r\n\r\n # Find the minimal value among all possible solutions\r\n for square in square_nums:\r\n if k < square:\r\n break\r\n new_num = minNumSquares(k-square) + 1\r\n min_num = min(min_num, new_num)\r\n print (min_num)\r\n return min_num\r\n\r\n return minNumSquares(n)\r\n\r\nz = Solution()\r\nprint (z.numSquares(n))","repo_name":"derahul9/Python","sub_path":"Programming Interview/Programs/Medium/Perfect Squares.py","file_name":"Perfect Squares.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2116139011","text":"import datetime\nimport pymssql\n\nconn = pymssql.connect(\n host='yourhost', # 位置\n user='youracct', # DB帳號\n password='yourpassword', # DB密碼\n database='yourDBname' # DB name\n)\n\ncursor = conn.cursor()\nstrCREATE_TABLE = \"IF NOT EXISTS(select * from sysobjects where name=\\'MSSQLArchive\\')\\\nCREATE TABLE MSSQLArchive( \\\nSEQ INT IDENTITY PRIMARY KEY,\\\nNAME VARCHAR(100) NOT NULL,\\\nRNAME VARCHAR(100) NOT NULL,\\\nNBR_TYPE VARCHAR(100) NOT NULL, \\\nPATH VARCHAR(100) NOT NULL, \\\nSEASON VARCHAR(10), \\\nEPISODE VARCHAR(10) NOT NULL,\\\nEPISODE_LIST VARCHAR(300),\\\nEXT VARCHAR(50) NOT NULL,\\\nCC VARCHAR(1) NOT NULL,\\\nTAG VARCHAR(500),\\\nMNT_DT datetime)\"\n\ncursor.execute(strCREATE_TABLE)\ntry:\n conn.commit()\n print('CREATE TABLE SUCCESS')\nexcept:\n print('CREATE TABLE ERROR')\n\n\ndict = {\n 'NAME': 'test name',\n 'RNAME': 'test name',\n 'NBR_TYPE': '1',\n 'PATH': 'D://one//',\n 'SEASON': '1',\n 'EPISODE': '9',\n 'EPISODE_LIST': '1,2,3,4,5,5.5,6,7,8',\n 'EXT': '.mp4',\n 'CC': '1',\n 'TAG': 'test,test name',\n 'MNT_DT': datetime.date.today()}\n\n\ntry:\n strttt = \"INSERT INTO MSSQLArchive ( NAME, RNAME, NBR_TYPE,PATH,SEASON,EPISODE,EPISODE_LIST,EXT,CC,TAG,MNT_DT) VALUES ('\" + \\\n dict['NAME']+\"','\" + dict['RNAME']+\"','\"+dict['NBR_TYPE'] + \"','\" + dict['PATH']+\"','\" + dict['SEASON']+\"','\"+dict['EPISODE'] + \\\n \"','\" + dict['EPISODE_LIST'] + \"','\"+dict['EXT']+\"','\" + \\\n dict['CC']+\"','\" + dict['TAG']+\"','\"+str(dict['MNT_DT'])+\"')\"\n cursor.execute(strttt)\n conn.commit()\n print('insert data to MSSQL success ...')\nexcept:\n print('寫入SQL資料錯誤')\n\n\ncursor.close()\nconn.close()\n","repo_name":"imneverdied/Python-note","sub_path":"Python MSSQL/Python MSSQL.py","file_name":"Python MSSQL.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23563791551","text":"import webbrowser\nfrom threading import Thread\nfrom tkinter import ttk\n\nfrom customtkinter import CTkButton, CTkFrame, CTkLabel\n\nfrom ice_launcher.components.heading import Heading\nfrom ice_launcher.components.scrollable_frame import ScrollableFrame\nfrom ice_launcher.lib import minecraft_news\n\n\nclass News(CTkFrame):\n articles: minecraft_news.Articles\n\n def __init__(self, master) -> None:\n super().__init__(master=master)\n\n self.grid_columnconfigure(0, weight=1)\n\n heading = Heading(master=self, text=\"🌎 News\")\n heading.grid(row=0, column=0, pady=20, padx=20, sticky=\"nwe\")\n\n self.news_frame = ScrollableFrame(master=self)\n self.news_frame.grid(row=1, column=0, pady=20, padx=20, sticky=\"nswe\")\n self.grid_rowconfigure(1, weight=1)\n\n Thread(target=self.update_news).start()\n\n def update_news(self) -> None:\n news = minecraft_news.fetch()\n\n for index, article in enumerate(news.article_grid):\n label = CTkLabel(\n master=self.news_frame.content,\n text=article.default_tile.title,\n anchor=\"w\",\n )\n label.grid(row=index * 2, column=0, pady=10, padx=0, sticky=\"nw\")\n open_button = CTkButton(\n master=self.news_frame.content,\n text=\"Open ↗️\",\n width=0,\n command=lambda: self.open_article_url(article.article_url),\n )\n open_button.grid(row=index * 2, column=1, pady=10, padx=(0, 10), sticky=\"e\")\n separator = ttk.Separator(self.news_frame.content, orient=\"horizontal\")\n separator.grid(\n row=index * 2 + 1,\n column=0,\n columnspan=2,\n pady=0,\n padx=(0, 10),\n sticky=\"ew\",\n )\n\n def open_article_url(self, url: str) -> None:\n url = f\"https://www.minecraft.net{url}\"\n webbrowser.open(url)\n","repo_name":"mq1/ice-launcher-old","sub_path":"ice_launcher/views/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"3721703259","text":"import time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nfrom thre3d_atom.rendering.volumetric.utils.misc import cast_rays, flatten_rays\nfrom thre3d_atom.thre3d_reprs.renderers import (\n render_sh_voxel_grid,\n SHVoxGridRenderConfig,\n)\nfrom thre3d_atom.thre3d_reprs.voxels import (\n VoxelGrid,\n VoxelSize,\n)\nfrom thre3d_atom.utils.constants import EXTRA_ACCUMULATED_WEIGHTS\nfrom thre3d_atom.utils.imaging_utils import (\n pose_spherical,\n CameraIntrinsics,\n CameraBounds,\n postprocess_depth_map,\n)\n\n\ndef _plot_all_cube_sides(\n voxel_grid: VoxelGrid,\n camera_intrinsics: CameraIntrinsics, # shouldn't be too high\n num_samples_per_ray: int,\n camera_bounds: CameraBounds,\n radius: float,\n device: torch.device,\n) -> float:\n height, width, _ = camera_intrinsics\n\n # render all 6 sides of the cube:\n render_times = []\n for side, (yaw, pitch) in enumerate(\n ((0, 0), (90, 0), (180, 0), (270, 0), (0, -90), (0, 90)), 1\n ):\n camera_pose = pose_spherical(yaw=yaw, pitch=pitch, radius=radius)\n rays = cast_rays(camera_intrinsics, camera_pose, device=device)\n\n # render the voxel grid:\n\n start_time = time.perf_counter()\n with torch.no_grad():\n rendered_output = render_sh_voxel_grid(\n voxel_grid=voxel_grid,\n rays=flatten_rays(rays),\n render_config=SHVoxGridRenderConfig(\n num_samples_per_ray=num_samples_per_ray,\n camera_bounds=camera_bounds,\n white_bkgd=True,\n ),\n )\n end_time = time.perf_counter()\n render_time = (end_time - start_time) * 1000 # ms\n render_times.append(render_time)\n\n # process the rendered output:\n # noinspection DuplicatedCode\n colour_render = (\n rendered_output.colour.reshape(height, width, 3).detach().cpu().numpy()\n )\n depth_render = (\n rendered_output.depth.reshape(height, width, 1).detach().cpu().numpy()\n )\n depth_render = postprocess_depth_map(depth_render, camera_bounds)\n acc_render = rendered_output.extra[EXTRA_ACCUMULATED_WEIGHTS]\n acc_render = acc_render.reshape(height, width, 1).detach().cpu().numpy()\n\n # show the rendered output:\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n fig.suptitle(f\"side {side}\")\n ax1.set_title(\"colour render\")\n ax1.imshow(colour_render)\n ax2.set_title(\"depth render\")\n ax2.imshow(depth_render)\n ax3.set_title(\"acc render\")\n ax3.imshow(acc_render, cmap=\"gray\")\n\n plt.show()\n render_time = np.mean(render_times).item()\n return render_time\n\n\ndef test_trilinear_interpolation_single_cube(device: torch.device) -> None:\n # fmt: off\n voxel_grid = VoxelGrid(\n densities=torch.tensor(\n [\n np.random.uniform(-10.0, 10.0, 1).item(),\n np.random.uniform(-10.0, 10.0, 1).item(),\n np.random.uniform(-10.0, 10.0, 1).item(),\n np.random.uniform(-10.0, 10.0, 1).item(),\n np.random.uniform(-10.0, 10.0, 1).item(),\n np.random.uniform(-10.0, 10.0, 1).item(),\n np.random.uniform(-10.0, 10.0, 1).item(),\n np.random.uniform(-10.0, 10.0, 1).item(),\n ],\n device=device,\n dtype=torch.float32,\n ).reshape(2, 2, 2, 1),\n features=torch.tensor(\n [\n 10.0, -10.0, -10.0,\n -10.0, 10.0, -10.0,\n -10.0, -10.0, 10.0,\n 10.0, 10.0, -10.0,\n -10.0, 10.0, 10.0,\n 10.0, -10.0, 10.0,\n 10.0, 10.0, 10.0,\n -10.0, -10.0, -10.0,\n ],\n device=device,\n dtype=torch.float32,\n ).reshape(2, 2, 2, 3),\n voxel_size=VoxelSize(2, 2, 2),\n density_preactivation=torch.nn.Identity(),\n density_postactivation=torch.nn.ReLU()\n )\n # fmt: on\n\n print(voxel_grid)\n\n _plot_all_cube_sides(\n voxel_grid,\n CameraIntrinsics(200, 200, 240),\n num_samples_per_ray=512,\n camera_bounds=CameraBounds(5.0, 18.0),\n radius=10.0,\n device=device,\n )\n\n\ndef test_render_speed(device: torch.device) -> None:\n # GIVEN: The following configuration:\n grid_size, num_samples_per_ray = 128, 256\n camera_intrinsics = CameraIntrinsics(400, 400, 512.0)\n num_samples_per_ray = 256\n camera_bounds = CameraBounds(0.5, 8.0)\n n_times = 100 # number of runs over which time is averaged\n\n # fmt: off\n densities = torch.empty((grid_size, grid_size, grid_size, 1), device=device)\n densities = torch.nn.init.uniform_(densities, -10.0, 10.0)\n features = torch.empty((grid_size, grid_size, grid_size, 3), device=device)\n features = torch.nn.init.uniform_(features, -10.0, 10.0)\n voxel_grid = VoxelGrid(\n densities=densities,\n features=features,\n voxel_size=VoxelSize(2.0 / 128, 2.0 / 128, 2.0 / 128),\n )\n # fmt: on\n\n print(voxel_grid)\n\n # render the voxel grid:\n print(f\"rendering images {n_times} times ...\")\n render_times = []\n for _ in tqdm(range(n_times)):\n # sample a random pose:\n yaw, pitch = np.random.uniform(0.0, 360.0), np.random.uniform(0.0, 180.0)\n radius = np.random.uniform(4.0, 5.0)\n camera_pose = pose_spherical(yaw=yaw, pitch=pitch, radius=radius)\n rays = cast_rays(camera_intrinsics, camera_pose, device=device)\n flat_rays = flatten_rays(rays)\n\n start_time = time.perf_counter()\n with torch.no_grad():\n rendered_output = render_sh_voxel_grid(\n voxel_grid=voxel_grid,\n rays=flat_rays,\n render_config=SHVoxGridRenderConfig(\n num_samples_per_ray=num_samples_per_ray,\n camera_bounds=camera_bounds,\n white_bkgd=True,\n ),\n parallel_points_chunk_size=None,\n )\n end_time = time.perf_counter()\n render_time = (end_time - start_time) * 1000 # ms\n render_times.append(render_time)\n\n # plot the final render for visual inspection :D\n height, width, _ = camera_intrinsics\n # noinspection DuplicatedCode\n colour_render = (\n rendered_output.colour.reshape(height, width, 3).detach().cpu().numpy()\n )\n depth_render = (\n rendered_output.depth.reshape(height, width, 1).detach().cpu().numpy()\n )\n depth_render = postprocess_depth_map(depth_render, camera_bounds)\n acc_render = rendered_output.extra[EXTRA_ACCUMULATED_WEIGHTS]\n acc_render = acc_render.reshape(height, width, 1).detach().cpu().numpy()\n # show the rendered output:\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n ax1.set_title(\"colour render\")\n ax1.imshow(colour_render)\n ax2.set_title(\"depth render\")\n ax2.imshow(depth_render)\n ax3.set_title(\"acc render\")\n ax3.imshow(acc_render, cmap=\"gray\")\n plt.show()\n\n avg_render_time = np.mean(render_times).item()\n print(f\"total time taken for rendering: {avg_render_time} ms\")\n","repo_name":"akanimax/thr3ed_atom","sub_path":"thre3d_atom/thre3d_reprs/tests/test_voxels.py","file_name":"test_voxels.py","file_ext":"py","file_size_in_byte":7218,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"81"} +{"seq_id":"27055718125","text":"import os\nimport discord\nfrom discord.ext import commands\nimport json\nimport time\nfrom time import gmtime, strftime\nimport asyncio\nimport random\nfrom collections import OrderedDict\nfrom MagicConchShell import start_time, dir_path\n\n\n# For $game command\ndef log_game(gamename, user):\n log_path = os.path.join(dir_path, 'LogGames.txt')\n with open(log_path, 'a') as log_file:\n log_file.write(gamename + \" \" + user + '\\n')\n\n\n# For $uptime command\ndef get_uptime():\n end_time = time.time()\n return(strftime(\"%H:%M:%S\", gmtime(int('{:.0f}'.format(float(str((end_time-start_time))))))))\n\n\nclass Utilities(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n \n\n #############################\n # ping command\n #############################\n @commands.command(help=\"$ping\", aliases=['p'])\n async def ping(self, ctx):\n await ctx.send(f'Bot latency: {round(self.client.latency * 1000)}ms')\n\n\n #############################\n # uptime command\n #############################\n @commands.command(help=\"$uptime\", aliases=['u'])\n async def uptime(self, ctx):\n await ctx.send(f'Uptime: {get_uptime()}')\n\n \n #############################\n # game command\n #############################\n @commands.command(help=\"$game [Name of Game]\")\n async def game(self, ctx, *arg):\n # If no game is entered. Empty tuple\n if not arg:\n time.sleep(.5)\n async with ctx.typing():\n await asyncio.sleep(random.uniform(.5, 2))\n await ctx.send(f\"You didn't specify a game...\")\n\n else:\n gamename = ''\n for i in arg:\n gamename += i + \" \"\n \n time.sleep(.5)\n async with ctx.typing():\n await asyncio.sleep(random.uniform(.5, 2))\n\n await ctx.send(f'Now playing {gamename}')\n await self.client.change_presence(activity=discord.Game(name=gamename))\n log_game(gamename, str(ctx.author))\n\n\n #############################\n # dice command\n #############################\n @commands.command(help='$dice 3d18 (Rolls 3 dice with 18 sides)')\n async def dice(self, ctx, *, arg):\n\n arg = arg.lower()\n\n if 'd' in arg:\n dice_params_lst = arg.split('d')\n num_of_dice = int(dice_params_lst[0])\n num_of_sides = int(dice_params_lst[1])\n\n if num_of_dice <= 0 or num_of_sides <= 0:\n async with ctx.typing():\n await asyncio.sleep(random.uniform(.5, 2))\n await ctx.send(f'```{arg}``` is not valid. You must use positive intergers.')\n return\n\n roll_lst = []\n for _ in range(num_of_dice):\n roll_lst.append(random.randint(1, num_of_sides))\n\n embed = discord.Embed(\n description=f\":game_die: {arg} {roll_lst}\", color=0xff0000)\n await ctx.send(embed=embed)\n else:\n async with ctx.typing():\n await asyncio.sleep(random.uniform(.5, 2))\n await ctx.send(f'\"{arg}\" is not valid syntax.')\n await ctx.send('To roll 3 dice with 18 sides:\\n```$dice 3d18```')\n\n\n # on_error\n @dice.error\n async def dice_handler(self, ctx, error):\n time.sleep(.5)\n async with ctx.typing():\n await asyncio.sleep(random.uniform(.5, 2))\n await ctx.send(\"Invalid syntax.\")\n await ctx.send('To roll 3 dice with 18 sides:\\n```$dice 3d18```')\n\n\n #############################\n # die command\n #############################\n @commands.command(help='$die (This automatically rolls a 1d6)')\n async def die(self, ctx):\n roll = random.randint(1, 6)\n embed = discord.Embed(\n description=f\":game_die: 1d6 [{roll}]\", color=0xff0000)\n await ctx.send(embed=embed)\n # await ctx.send(f'```1d6 [{roll}]```')\n\n\n #############################\n # Bread count command\n #############################\n @commands.command(help='$bread (Lists bread stats)')\n async def bread(self, ctx):\n bread = '\\U0001f35e'\n \n # Get bread data\n with open(os.path.join(dir_path, 'breads.json'), 'r') as f:\n bread_dict = json.load(f)\n\n # Create embed\n embed = discord.Embed(\n title='Bread Stats!',\n url='https://discord.com/assets/b4145d2678321d6d3376f1c88604fd42.svg',\n icon_url='https://images.emojiterra.com/twitter/v13.0/512px/1f35e.png',\n description=f'Shows number of times users have been blessed with {bread} by The MagicConchShell.\\n------------------------',\n color=0xff0000\n )\n embed.set_thumbnail(url='https://images.emojiterra.com/twitter/v13.0/512px/1f35e.png')\n # embed.set_author(name=\"MagicConchShell\", url='https://github.com/AnxiousCrow/MagicConchShell', icon_url='https://cdn.discordapp.com/avatars/754734227651690526/3da3291519ea3abc53cb2409e8686c67.png?size=256')\n\n\n bread_dict_sorted = dict(sorted(bread_dict.items(), key=lambda item: item[1], reverse=True))\n for username, num in bread_dict_sorted.items():\n # username = username[:-5]\n # if num == 1:\n embed.add_field(name=f'{username}', value=f'{num} {bread}', inline=True)\n # else:\n # embed.add_field(name=f'{username}', value=f'{num} {bread}', inline=True)\n\n await ctx.send(embed=embed)\n\n\ndef setup(client):\n client.add_cog(Utilities(client))\n","repo_name":"AnxiousCrow/MagicConchShell_v2","sub_path":"cogs/Utilities.py","file_name":"Utilities.py","file_ext":"py","file_size_in_byte":5556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17884090490","text":"#! usr/bin/python\n#gzip 파일 읽기 \n#gzip으로 압축 후 파일명이 gz이면 바로 읽히지 않음 \nimport gzip \n\nfile_name= \"covid19.fasta.gz\"\ndata = dict() #data = {}\n\n\n#with gzip.open(file_name,\"rb\") as handle: 라면 바이너리 파일이라 이상하게 나옴그래서 \nwith gzip.open(file_name, 'rt') as handle:\n for line in handle:\n #line = line.decode(\"utf-8\")\n if line.startswith(\">\"):\n continue\n for base in line.strip():\n if base not in data:\n data[base] =0\n data[base] +=1\n\nprint(data) \n'''\n#startswith (\">\") \n첫번째 줄을 검사했는데 >이걸로 시작을 하니까 continue로 해서 \nfor문으로 올라가서 두번째 라인부터 검사를 시작\n또 마지막은 엔터가 쳐져있어서 line.strip으로 엔터를 날릴꺼\n그럼 for문에 문자열 하나하나 넘어오게됨 \nbase는 a가 데이터에 들어있는가? 딕션너리가 빈거니까 안들어가 있음 \ndata[base] =0이 실행됨 그리고 그다음 data[base] +=1은 이미 들어가있는건 딕션너리에 밸류값 이 카운트되서 올라감 \n\n'''\n\n","repo_name":"CHOIsunhyeon/bioinfo-lecture-2021-07","sub_path":"src2/covid19_1.py","file_name":"covid19_1.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23077894247","text":"#2293 동전 1\nn,k=map(int,input().split())\ncoin=[]\nfor _ in range(n):\n v=int(input())\n coin.append(v)\n\ndp=[0]*(k+1)\ndp[0]=1 #합이 0원이 되는 경우의 수\nfor i in range(n):\n for j in range(coin[i],k+1):\n dp[j]+=dp[j-coin[i]]\nprint(dp[k])","repo_name":"gayoungee/gy-algorithm","sub_path":"pythonProject/DP/2293.py","file_name":"2293.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30651193816","text":"import streamlit as st\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom wordcloud import WordCloud\n\nif st.checkbox('¿Qué es Coursera?'):\n st.write('Es una plataforma educativa virtual que ofrece cursos diversos en temáticas disponibles en distintos idiomas y desarrollada por la Universidad de Stanford.')\n\n#carga de datos\ncoursera_courses=pd.read_csv('Datasets/df_cc.csv')\ndf_cc=pd.DataFrame(coursera_courses) \n\ncoursera_reviews=pd.read_csv('Datasets/df_cr.csv')\ndf_cr=pd.DataFrame(coursera_reviews)\n\n#Presentación datos\nif st.checkbox('Datos en tabla'):\n st.dataframe(df_cc)\n if st.checkbox('Vista de los primeras ó últimas 5 filas'):\n if st.button('Mostrar primeras 5 filas'):\n st.write(df_cc.head())\n if st.button('Mostrar últimas 5 filas'):\n st.write(df_cc.tail())\n\n#nube de palabras\nif st.checkbox('Nube de palabras'):\n if st.button('Títulos'):\n all_titles_cc = ' '.join(df_cc['name'])\n wordcloud = WordCloud(width=800, height=400, max_font_size=150, random_state=42).generate(all_titles_cc)\n\n # Configura y muestra la figura\n fig=plt.figure(figsize=(10, 6))\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis('off')\n st.pyplot(fig)\n st.write('Conclusión de Nube de Palabras de Títulos Coursera: Parece ser que existe una mezcla de palabras más comunes, aquellas de índole de las ciencias computaciones tales como JavaScript, Web Development, HTML y otras de música como Piano, Guitarra. También resulta relevante la importante de la cursos introductorios y de entrenamiento para llamar la atención. El título es el primer acercamiento del posible consumidor con el producto.')\n\n if st.button('Instituciones'):\n all_institution_cc = ' '.join(df_cc['institution'])\n wordcloud = WordCloud(width=800, height=400, max_font_size=150, random_state=42).generate(all_institution_cc)\n\n # Configura y muestra la figura\n fig=plt.figure(figsize=(10, 6))\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis('off')\n st.pyplot(fig)\n st.write('Conclusiones Nube de palabras Instituciones de Coursera: Las universidades estadounidenses parecen liderar la oferta de cursos brindados, Duke University conocida por su enfoque de negocios, Business School, University of Pennsylvania son algunas de las más enfocadas en el ámbito empresarial. También llama la atención la presencia de Google Cloud y de IBM dos empresa que han ofertados cursos en el área de informática. ')\n\nif st.checkbox('Gráficos de Relaciones y Frecuencia de variables'):\n if st.button('Top 5 cursos por número de calificaciones brindadas'):\n\n # Realizar el merge entre df_cr y df_cc\n merged_df = df_cr.merge(df_cc, on='course_id')\n\n # Contar el número de ocurrencias de cada course_id\n course_counts = merged_df['course_id'].value_counts()\n\n # Seleccionar los 10 cursos con el mayor número de ocurrencias\n top_5_courses = course_counts.head(5)\n\n # Crear el gráfico de barras\n fig=plt.figure(figsize=(12, 6))\n sns.barplot(x=top_5_courses.index, y=top_5_courses.values)\n\n # Título del gráfico\n plt.title('Los 5 Cursos con mayor número de calificaciones')\n\n # Etiquetas del eje x\n plt.xlabel('Course ID')\n\n # Etiquetas del eje y\n plt.ylabel('Número de Calificaciones en Coursera')\n\n # Mostrar el gráfico\n st.pyplot(fig)\n st.write('Conclusiones Diagrama de Columnas de Los cursos con mayor número de calificaciones de Coursera: Se observa la presencia predominante de tópicos enfocados en TI como python, python-data, machine-learning y deep learning. Con base en lo anterior, ante la falta de una variable subscriptores en Coursera y entendiendose como aquellas personas que calificaron el curso sean únicamente consumidores del producto, podemos aseverar que los cursos TI en Coursera tienen una mayor demanda en comparación a otros tópicos. ')\n \n if st.button('Top 5 cursos por Rating (Mínimo de calificaciones obtenidas de 1071)'): \n # Filtrar los cursos con 'num_ratings' mayor a 1071\n filtered_courses = df_cc[df_cc['num_ratings'] > 1071]\n\n # Ordenar los cursos filtrados por 'media_rating' de manera descendente\n top_10_courses = filtered_courses.nlargest(10, 'media_rating')\n\n # Crear el gráfico de columnas\n fig=plt.figure(figsize=(12, 6))\n sns.barplot(x='media_rating', y='course_id', data=top_10_courses)\n\n # Título del gráfico\n plt.title('Los 10 Mejores Cursos con el Rating más Alto (Número de Calificaciones mínima de 1071)')\n\n # Mostrar el gráfico\n st.pyplot(fig)\n st.write('Conclusiones Diagrama de Barras de los cursos con mayor rating que tienen al menos 1071 número de calificaciones: Se observa una predominancia de 2 áreas TI observable con el tópico build-a-computer y programming-language, al igual que otros cursos enfocados en humanides como painting, educación, introclassicmusic. NOTA: La condición fue establecida para poder filtrar aquellos que tuvieran un mínimo de 1071 calificaciones, este número representa la mediana del número de calificaciones. ')\n\nif st.checkbox('Conclusión de catálogo Coursera'):\n st.write('Coursera provee de 3 caracteríticas muy relevantes, el número de calificaciones obtenidas, rating y las instituciones que los brindan. Las 2 primeras pueden ser utilizadas para trazar una línea de tendencia a encontrar la popularidad de centros educativos para poder cotizar aquellos cursos que son más populares y estén disponibles de ellos. Cabe resaltar que la falta de temática complico el análisis por otras áreas como subject o nivel. ')\n","repo_name":"RCastroPeraza/Interactive_Dashboard_Data_Analysis_MOOCs","sub_path":"pages/02_📊_Graficos_Coursera.py","file_name":"02_📊_Graficos_Coursera.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72008418185","text":"import sys\nimport os\nimport numpy as np\nimport json\nfrom skimage import io\nfrom .model import load_model\nfrom .conf import config\n\n\ndef get_img_path() -> str:\n\t'''\n\t\tReturn the absolute path of the image to predict \n\t'''\n\treturn \tos.path.join(\n\t\tconfig[\"data_to_predict_path\"], \n\t\tsys.argv[1]\n\t)\n\n\ndef get_img() -> np.ndarray:\n\t'''\n\t\tReturn the image as a np.ndarray \n\t'''\n\treturn io.imread(\n\t\tget_img_path()\n\t)\n\n\ndef save_prediction(image_name: str, pred: str):\n\t'''\n\t\tParameters: \n\t\t\timage_name (str): name of the file of the prediction\n\t\t\tpred (str): the predicted class for the image\n\t'''\n\tpred_info = {\n\t\tf\"{image_name}\": f\"{pred}\"\n\t}\n\n\twith open(config[\"prediction\"], 'w+') as file:\n\t\tjson.dump(pred_info, file)\n\n\nif __name__ == \"__main__\":\n\tmodel = load_model()\n\timg = get_img()\n\tpred = model.predict(np.reshape(img, (1, 28, 28)))[0]\n\tsave_prediction(sys.argv[1], pred)\n","repo_name":"carl2g/deep_move","sub_path":"deep_move/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10496497761","text":"import os, sys\nfrom dataformatter import DataFormatter\nfrom pyQueryConstructor import QueryConstructor\nfrom edeconnector import Connector\nfrom edeconfig import readConf\n\n\nif __name__ == '__main__':\n dataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')\n prometheus_endpoint = '194.102.62.155'\n # prometheus_endpoint = '10.9.8.136'\n # prometheus_endpoint = 'hal720m.sage.ieat.ro'\n prometheus_port = '9090'\n print(\"Collecting data from Monitoring at: {}\".format(prometheus_endpoint))\n prometheus_query = {\"query\": '''{__name__=~\"node.+\"}[65m]'''}\n # prometheus_query = qContructor.pr_query_node(time=\"1h\")\n edeConnector = Connector(prEndpoint=prometheus_endpoint, MInstancePort=prometheus_port)\n\n # test0 = edeConnector.pr_health_check()\n # print(test0)\n test = edeConnector.pr_targets()\n print(\"Current target information:\")\n print(test)\n test1 = edeConnector.pr_labels('cpu')\n print(test1)\n test2 = edeConnector.pr_status()\n print(\"Status information\")\n print(test2)\n print(\"Executing query ....\")\n test3 = edeConnector.pr_query(query=prometheus_query)\n # print(test1['data']['result'])\n dformat = DataFormatter(dataDir)\n print(\"Query completed ....\")\n print(\"Saving ...\")\n test_format = dformat.prtoDF(test3, checkpoint=True, verbose=True)\n print(\"Saved\")\n\n # test4 = edeConnector.localData('ede_data_exp.csv')\n\n\n\n # #Standard query values\n # # qte = 1475842980000\n # # qlte = 1475845200000\n # qgte = 1521390795179\n # qlte = 1521477195179\n # qsize = 0\n # qinterval = \"10s\"\n #\n #\n # dmonConnector = Connector('85.120.206.59')\n # qConstructor = QueryConstructor(queryDir='/Users/Gabriel/Documents/workspaces/diceWorkspace/dmon-adp/queries')\n # dformat = DataFormatter(dataDir)\n #\n # test = dmonConnector.clusterHealth()\n # test2 = dmonConnector.clusterSettings()\n # test3 = dmonConnector.clusterState()\n # test4 = dmonConnector.nodeInfo()\n # test5 = dmonConnector.nodeState()\n # test6 = dmonConnector.getIndex('logstash-*')\n # test7 = dmonConnector.getIndexSettings('logstash-*')\n #\n # # body = {\n # # 'timestamp': datetime.utcnow(),\n # # 'anomaly': 'complex',\n # # 'host': '10.0.0.0'\n # # }\n # #\n # # test8 = dmonConnector.pushAnomaly('testme', doc_type='d', body=body)\n #\n # print(test)\n # print(test2)\n # print(test3)\n # print(test4)\n # print(test5)\n # print(test6)\n # print(test7)\n # # print test8\n #\n # nodes = ['dice.cdh.master', 'dice.cdh.slave1', 'dice.cdh.slave2', 'dice.cdh.slave3']\n # checkpoint = True\n # lload = []\n # lmemory = []\n # linterface = []\n # lpack = []\n # for node in nodes:\n # load, load_file = qConstructor.loadString(node)\n # memory, memory_file = qConstructor.memoryString(node)\n # interface, interface_file = qConstructor.interfaceString(node)\n # packet, packet_file = qConstructor.packetString(node)\n #\n # # Queries\n # qload = qConstructor.systemLoadQuery(load, qgte, qlte, qsize, qinterval)\n # qmemory = qConstructor.systemMemoryQuery(memory, qgte, qlte, qsize, qinterval)\n # qinterface = qConstructor.systemInterfaceQuery(interface, qgte, qlte, qsize, qinterval)\n # qpacket = qConstructor.systemInterfaceQuery(packet, qgte, qlte, qsize, qinterval)\n #\n # # Execute query and convert response to csv\n # qloadResponse = dmonConnector.aggQuery(qload)\n # gmemoryResponse = dmonConnector.aggQuery(qmemory)\n # ginterfaceResponse = dmonConnector.aggQuery(qinterface)\n # gpacketResponse = dmonConnector.aggQuery(qpacket)\n #\n # if checkpoint:\n # linterface.append(dformat.dict2csv(ginterfaceResponse, qinterface, interface_file, df=checkpoint))\n # lmemory.append(dformat.dict2csv(gmemoryResponse, qmemory, memory_file, df=checkpoint))\n # lload.append(dformat.dict2csv(qloadResponse, qload, load_file, df=checkpoint))\n # lpack.append(dformat.dict2csv(gpacketResponse, qpacket, packet_file, df=checkpoint))\n # else:\n # dformat.dict2csv(ginterfaceResponse, qinterface, interface_file)\n # dformat.dict2csv(gmemoryResponse, qmemory, memory_file)\n # dformat.dict2csv(qloadResponse, qload, load_file)\n # dformat.dict2csv(gpacketResponse, qpacket, packet_file)\n #\n # if not checkpoint:\n # dformat.chainMergeSystem()\n # # Merge system metricsall\n # merged_df = dformat.chainMergeNR()\n # dformat.df2csv(merged_df, os.path.join(dataDir, \"System.csv\"))\n # else:\n # df_interface, df_load, df_memory, df_packet = dformat.chainMergeSystem(linterface=linterface,\n # lload=lload, lmemory=lmemory,\n # lpack=lpack)\n # merged_df = dformat.chainMergeNR(interface=df_interface, memory=df_memory,\n # load=df_load, packets=df_packet)\n # merged_df.set_index('key', inplace=True)\n # merged_df.to_csv(os.path.join(dataDir, 'System_2.csv'))","repo_name":"IeAT-ASPIDE/Event-Detection-Engine","sub_path":"experiments/ede_exp/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":5298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70247549384","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport icf\nimport pandas as pd\nimport scipy.interpolate as interpolate\nfrom scipy.optimize import curve_fit\n\ndef calculate_pixels():\n \"\"\"\n delta tao = temporal resolution\n v = sweep speed = 63 ps/mm\n d = width of the slit in front of the photocathode = 0.25 mm\n M = instrument magnification = 1.24\n delta s = spatial resolution = 150 micron meter\n \"\"\"\n d = 0.25\n M = 1.24\n delta_s = 0.15 # changed to mm\n v = 63\n\n delta_tao = (np.sqrt((d*M)**2 + delta_s**2))/(1/v)\n # print (delta_tao)\n\n pixels_to_mm = 0.06\n\n\n number_of_pixels = 1 / (v * pixels_to_mm / delta_tao)\n # print (number_of_pixels)\n\ndef plot_the_data(X, Y, title):\n plt.plot(X, Y)\n plt.title(title)\n plt.show()\n return True\n\noriginal_xdata, original_ydata = icf.load_2col(\"../images/data/lineout_2.csv\")\n#xdata = reversed_arr = arr[::-1]\n#plot_the_data(original_xdata, original_ydata, \"original\")\nrearranged_xdata = original_xdata[::-1]\n\n\n\nenergy_transitions = pd.read_csv(\"../images/data/energy_transitions.csv\")\nenergy_transitions[\"difference_between_past_point\"] = energy_transitions[\"ev\"].diff()\n# print (energy_transitions.head(13))\nenergy_transitions = energy_transitions[energy_transitions[\"x_value\"].notna()]\nenergy_transitions = energy_transitions.drop([\"difference_between_past_point\"], axis=1)\n# print (energy_transitions[[\"ev\", \"x_value\"]])\n\npeakEnergy = energy_transitions[\"ev\"]\npeakCentre = energy_transitions[\"x_value\"]\n\n\n\n# f_fit = interpolate.interp1d(peakCentre, peakEnergy, kind=\"linear\")\n# energy_fitted = f_fit(peakCentre)\n\ndef f(x, *params):\n a = params[0]\n b = params[1]\n c = params[2]\n d = params[3]\n\n return a + x*b + c*x**2 + d*x**3\nguess = [0, 0, 0, 0]\npopt_dispersion, pcov_dispersion = curve_fit(f, peakCentre, peakEnergy, p0=guess)\nenergy_fitted = f(rearranged_xdata, *popt_dispersion)\n\n# plt.plot(peakCentre, peakEnergy, 'o', label=\"points\")\n# plt.plot(rearranged_xdata, energy_fitted, label=\"fit\")\n# plt.legend(loc=\"best\")\n# plt.xlabel(\"peackCentre\")\n# plt.ylabel(\"peackEnrgy\")\n# plt.show()\n\n# plt.plot(energy_fitted,original_ydata)\n# for peak in peakEnergy:\n# plt.axvline(peak, color='r')\n# plt.plot()\n# plt.xlabel(\"energy\")\n# plt.show()\n# y_data_shifted = f(rearranged_xdata, *popt)\n\n# plt.plot(rearranged_xdata, original_ydata, label='Data')\n# plt.plot(rearranged_xdata, y_data_shifted, label=\"Interpolation\")\n# plt.show()\n# print (\"min: {}, max: {}\".format(np.min(energy_fitted), np.max(energy_fitted)))\n\nfiles_and_interpolations = pd.DataFrame(\n {\n \"file\":[\"photocathode.dat\", \"reflectivity.dat\", \"xray1318.dat\"],\n \"function\":[\"C(E)\", \"R(E)\", \"T(E)\"]\n })\n\nenergy_spectrum_data = pd.DataFrame({\"Energy(eV)\": energy_fitted, \"spectrum\": original_ydata})\nenergy_spectrum_data = energy_spectrum_data[energy_spectrum_data[\"Energy(eV)\"] >= 3034]\n\nfor idx, row in files_and_interpolations.iterrows():\n file = row[\"file\"]\n # print (file)\n correction_data_x, correction_data_y = icf.load_2col(\"../images/corrections/\"+file)\n interpolation_func = interpolate.interp1d(correction_data_x, correction_data_y, kind=\"linear\")\n corrected_spectrum = interpolation_func(energy_spectrum_data[\"Energy(eV)\"])\n energy_spectrum_data[row[\"function\"]] = corrected_spectrum\n\n# plt.plot(energy_spectrum_data[\"Energy(eV)\"], corrected_spectrum, label=row[\"function\"])\n\n# print (energy_spectrum_data)\n# plt.legend(loc=\"best\")\n# plt.show()\n\nenergy_spectrum_data[\"corrections_combined\"] = energy_spectrum_data[\"C(E)\"] * energy_spectrum_data[\"R(E)\"] * energy_spectrum_data[\"T(E)\"]\n\nenergy_spectrum_data[\"corrected_wavelength\"] = energy_spectrum_data[\"spectrum\"] / energy_spectrum_data[\"corrections_combined\"]\n\n# plt.plot(energy_spectrum_data[\"Energy(eV)\"], energy_spectrum_data[\"spectrum\"], label=\"original spectrum\")\n# plt.plot(energy_spectrum_data[\"Energy(eV)\"], energy_spectrum_data[\"corrected_wavelength\"], label=\"corrected spectrum\")\n# plt.legend(loc=\"best\")\n# plt.xlabel(\"Energy(eV)\")\n# #plt.ylabel(\"\")\n# plt.show()\n\n# fig, ax = plt.subplots()\n# ax.plot(energy_spectrum_data[\"Energy(eV)\"], energy_spectrum_data[\"spectrum\"], label=\"original spectrum\")\n# ax.legend(loc=\"best\")\n# ax.set_xlabel(\"Energy(eV)\")\n# ax2=ax.twinx()\n# ax2.plot(energy_spectrum_data[\"Energy(eV)\"], energy_spectrum_data[\"corrected_wavelength\"], label=\"corrected spectrum\", color=\"r\")\n# ax2.legend(loc=\"best\")\n# plt.show()\n\ndef gaussian(x, *params):\n\n A = params[0]\n x0 = params[1]\n c = params[2]\n\n return A*np.exp(-(x-x0)**2 / (2*c**2))\n\n\ndef multiple_gaussians(x, *params):\n y0 = params[9]\n G1 = gaussian(x, *params[0:3])\n G2 = gaussian(x, *params[3:6])\n G3 = gaussian(x, *params[6:9])\n\n return y0 + G1 + G2 + G3\n\n\nguesses = [4300, 3685, 80,\n 1800, 3875, 50,\n 3000, 3940, 60, \n 500\n]\n\n\nHe_B = 3683.7\nLy_B = 3935.6\nHe_alpha = 3139.3\n# plt.plot(energy_spectrum_data[\"Energy(eV)\"], energy_spectrum_data[\"corrected_wavelength\"], label=\"corrected spectrum\")\n# plt.axvline(He_B, color='r', label=\"He-B\")\n# plt.axvline(Ly_B, color='r', label=\"Ly-B\")\n# plt.legend(loc=\"best\")\n# plt.xlabel(\"Energy(eV)\")\n# plt.show()\n\nenergy_spectrum_data = energy_spectrum_data[energy_spectrum_data[\"Energy(eV)\"] >= 3500]\nenergy_spectrum_data = energy_spectrum_data[energy_spectrum_data[\"Energy(eV)\"] <= 4100]\nranges = [\n [0, 3400, 1, 0, 3400, 1, 0, 3400, 1, 0],\n [np.inf, 4500, 200, np.inf, 4500, 200, np.inf, 4500, 200, 2000]\n]\npopt, pcov = curve_fit(multiple_gaussians, energy_spectrum_data[\"Energy(eV)\"], energy_spectrum_data[\"corrected_wavelength\"], guesses, bounds=ranges)\nfit_data = multiple_gaussians(energy_spectrum_data[\"Energy(eV)\"], *popt)\n# print (popt, pcov)\nplt.plot(energy_spectrum_data[\"Energy(eV)\"], energy_spectrum_data[\"corrected_wavelength\"], label=\"corrected spectrum\")\nplt.plot(energy_spectrum_data[\"Energy(eV)\"], fit_data, label=\"fit\")\nplt.axvline(He_B, color='r', label=\"He-B\")\nplt.axvline(Ly_B, color='r', label=\"Ly-B\")\n# plt.axvline(He_alpha, color='r', label=\"He-alpha\")\nplt.legend(loc=\"best\")\nplt.xlabel(\"Energy(eV)\")\nplt.show()\n\ndef get_delta_x(c, n=2):\n return np.sqrt(2) * c * (np.log(2))**(1/n)\n\ndef R(source_s=100, ssc=150, film=1, dig=60):\n return np.sqrt(source_s**2 + ssc**2 + film**2 + dig**2)\n\ndef get_fwhm(C, C_cov):\n print (C, C_cov)\n C_error = np.sqrt(C_cov)\n delta_x = get_delta_x(C)\n C_error_prct = C_error / C\n delta_x_error = delta_x * C_error_prct\n diameter = delta_x * 2\n diameter_error = delta_x_error * 2\n # need to add more parameters... psf?\n print (\"R\" + str(R()))\n R_2_value = (R() * (200/6000))**2\n print (\"R2: \" + str(R_2_value))\n core_diameter = np.sqrt((diameter**2) - (R_2_value)) \n core_diameter_error = np.sqrt((diameter_error**2))\n print (core_diameter_error)\n return core_diameter, core_diameter_error\nprint (\"i\")\nfwhm_df = pd.DataFrame({\n \"C\": [popt[2], popt[5], popt[8]],\n \"C_cov\": [pcov[2][2], pcov[5][5], pcov[8][8]],\n})\n\nfwhm_df[\"S_FWHM\"], fwhm_df[\"S_FWHM_error\"] = get_fwhm(fwhm_df[\"C\"], fwhm_df[\"C_cov\"])\n\nprint (fwhm_df)\nfor idx, row in fwhm_df.iterrows():\n \n print (\"The value of FWHM is: {} +- {}\".format(row[\"S_FWHM\"], np.sqrt(row[\"S_FWHM\"])))\n","repo_name":"hectorjassog/uoy_icf_labs","sub_path":"lab2/code/lab_2.py","file_name":"lab_2.py","file_ext":"py","file_size_in_byte":7235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24121463606","text":"import requests\nimport numpy as np\nfrom bs4 import BeautifulSoup\n\n# get the list of free proxies\n\n# didsoft proxies\ndef getProxiesDS(link, reqHeader=None):\n proxies = []\n r = requests.get(link)\n soup = BeautifulSoup(r.content, 'html.parser')\n table = soup.find('tbody')\n # proxies = []\n for row in table:\n if row.find_all('td')[4].text == 'elite proxy' or row.find_all('td')[4].text == 'anonymous' or row.find_all('td')[4].text == 'transparent':\n # proxy = ':'.join([row.find_all('td')[0].text,\n # row.find_all('td')[1].text])\n proxies.append([row.find_all('td')[0].text,\n row.find_all('td')[1].text])\n else:\n pass\n return proxies\n\n# proxies scraped from github repos\ndef getProxiesGH(link, reqHeader=None):\n r = requests.get(link)\n soup = BeautifulSoup(r.content, 'html.parser')\n table = soup.find('table')\n proxies = []\n if (table.find_all('tr')):\n for row in table.find_all('tr'):\n host, port = str(row.select('tr > td')[1].text).split(':')\n proxies.append([host, port])\n return proxies\n\n\n# format like: [\"ip\", \"port\"]\ndidsoftProxies = [\n \"https://free-proxy-list.net/\",\n \"https://www.us-proxy.org/\",\n \"https://www.sslproxies.org/\"\n]\n\n# format like: [\"ip\", \"port\"]\ngithubRepoProxies = [\n \"https://github.com/TheSpeedX/PROXY-List/blob/master/http.txt\",\n \"https://github.com/monosans/proxy-list/blob/main/proxies/http.txt\",\n \"https://github.com/ShiftyTR/Proxy-List/blob/master/http.txt\",\n \"https://github.com/ShiftyTR/Proxy-List/blob/master/https.txt\",\n \"https://github.com/mmpx12/proxy-list/blob/master/http.txt\",\n \"https://github.com/mmpx12/proxy-list/blob/master/https.txt\",\n \"https://github.com/zevtyardt/proxy-list/blob/main/http.txt\",\n \"https://github.com/sunny9577/proxy-scraper/blob/master/proxies.txt\",\n \"https://github.com/UptimerBot/proxy-list/blob/master/proxies/http.txt\",\n \"https://github.com/roosterkid/openproxylist/blob/main/HTTPS_RAW.txt\",\n \"https://github.com/prxchk/proxy-list/blob/main/http.txt\",\n \"https://github.com/HyperBeats/proxy-list/blob/main/http.txt\"\n]\n\nproxies = []\nfor link in githubRepoProxies:\n proxies += getProxiesGH(link)\n\nprint(proxies)\nprint(len(proxies))","repo_name":"cleeclee123/PChecker","sub_path":"PChecker/src/utils/scrape_test.py","file_name":"scrape_test.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11564640277","text":"from __future__ import division, print_function\n\nimport sys\n\nfrom optparse import OptionParser\nfrom os import close, remove\nfrom os.path import abspath, join, split\nfrom random import seed\nfrom re import search\nfrom tempfile import mkstemp\n\nimport numpy as np\n\nfrom Bio import AlignIO\n\nfrom idepi.alphabet import Alphabet\nfrom idepi.databuilder import DataBuilder\nfrom idepi.filter import naivefilter\n\nfrom idepi import (\n Labeler,\n Regressor,\n alignment_identify_ref,\n extract_feature_weights,\n cv_results_to_output,\n generate_alignment,\n input_data,\n is_HXB2,\n regressor_classes,\n pretty_fmt_results,\n seqrecord_get_values,\n set_util_params,\n __file__ as _idepi_file,\n __version__ as _idepi_version\n)\n\nfrom pyxval import ContinuousPerfStats, CrossValidator\n\n__VERSION__ = _idepi_version\n\n_IDEPI_PATH = split(abspath(_idepi_file))[0]\n_HXB2_DNA_FASTA = join(_IDEPI_PATH, 'data', 'hxb2_dna.fa')\n_HXB2_AMINO_FASTA = join(_IDEPI_PATH, 'data', 'hxb2_pep.fa')\n\n_PHYLOFILTER_BATCHFILE = join(_IDEPI_PATH, 'data', 'hyphy', 'CorrectForPhylogeny.bf')\n\n_DEFAULT_NUM_FEATURES = 10\n\n# strip the _TEST variables because of the beginning and trailing newlines\n_TEST_DNA_STO = '''# STOCKHOLM 1.0\n1||A|1 AUGAUUCCCGACUUUAAANNN\n2||A|21 AUGAUUCCCGACUUUAAANNNCAC\n3||A|50 AUGAUUCCCAAANNNCAC\n4||B|0.5 AUGCCCGACUUUAAACAC\ngi|9629357 AUGCCCGACUUUAAACAC\n//'''.strip()\n\n_TEST_AMINO_STO = '''# STOCKHOLM 1.0\n1||A|1 MIPDFKX-\n2||A|21 MIPDFKXH\n3||A|50 MIP--KXH\n4||B|0.5 .MPDFKH-\ngi|9629363 -MPDFKH-\n//'''.strip()\n\n_TEST_AMINO_NAMES = ['0aM', '0a[]', 'M1I', 'M1M', 'P2P', 'D3D', 'D3[]', 'F4F', 'F4[]', 'K5K', 'H6H', 'H6X', '6aH', '6a[]']\n_TEST_STANFEL_NAMES = ['0a[ACGILMPSTV]', '0a[]', 'M1[ACGILMPSTV]', 'P2[ACGILMPSTV]', 'D3[DENQ]', 'D3[]', \\\n 'F4[FWY]', 'F4[]', 'K5[HKR]', 'H6[HKR]', 'H6[X]', '6a[HKR]', '6a[]']\n\n_TEST_Y = np.array([1.,21.,50.,0.5])\n\n_TEST_AMINO_X = np.array([[1,0,1,0,1,1,0,1,0,1,0,1,0,1],\n [1,0,1,0,1,1,0,1,0,1,0,1,1,0],\n [1,0,1,0,1,0,1,0,1,1,0,1,1,0],\n [0,1,0,1,1,1,0,1,0,1,1,0,0,1]])\n\n_TEST_STANFEL_X = np.array([[1,0,1,1,1,0,1,0,1,0,1,0,1],\n [1,0,1,1,1,0,1,0,1,0,1,1,0],\n [1,0,1,1,0,1,0,1,1,0,1,1,0],\n [0,1,1,1,1,0,1,0,1,1,0,0,1]])\n\nOPTIONS = None\n\n\ndef optparse_extend(option, opt_str, value, parser):\n if getattr(parser.values, option.dest, None) is None:\n setattr(parser.values, option.dest, [])\n getattr(parser.values, option.dest).extend(value)\n\n\ndef optparse_csv(option, opt_str, value, parser):\n setattr(parser.values, option.dest, value.split(','))\n\n\ndef optparse_data(option, _, value, parser):\n setattr(parser.values, option.dest, input_data(value))\n\n\ndef setup_option_parser():\n\n parser = OptionParser(usage = '%prog [options] ANTIBODY')\n\n # option action='store' callback type nargs=1 dest\n parser.add_option('--hmmalign', type='string', dest='HMMER_ALIGN_BIN')\n parser.add_option('--hmmbuild', type='string', dest='HMMER_BUILD_BIN')\n parser.add_option('--hmmiter', type='int', dest='HMMER_ITER')\n parser.add_option('--method', type='string', dest='REGRESSOR_METHOD')\n parser.add_option('--filter', action='callback', callback=optparse_csv, type='string', dest='FILTER')\n parser.add_option('--clonal', action='store_true', dest='CLONAL')\n parser.add_option('--numfeats', type='int', dest='NUM_FEATURES')\n parser.add_option('--subtypes', action='callback', callback=optparse_csv, type='string', dest='SUBTYPES')\n parser.add_option('--weighting', action='store_true', dest='WEIGHTING')\n parser.add_option('--amino', action='store_true', dest='AMINO')\n parser.add_option('--dna', action='store_true', dest='DNA')\n parser.add_option('--stanfel', action='store_true', dest='STANFEL')\n parser.add_option('--cv', type='int', dest='CV_FOLDS')\n parser.add_option('--loocv', action='store_true', dest='LOOCV')\n parser.add_option('--maxcon', type='float', dest='MAX_CONSERVATION')\n parser.add_option('--maxgap', type='float', dest='MAX_GAP_RATIO')\n parser.add_option('--mincon', type='float', dest='MIN_CONSERVATION')\n parser.add_option('--neuts', action='callback', callback=optparse_data, type='string', dest='DATA')\n parser.add_option('--hxb2', type='string', dest='REFSEQ_FASTA')\n parser.add_option('--ids', action='callback', callback=optparse_csv, type='string', dest='HXB2_IDS')\n parser.add_option('--test', action='store_true', dest='TEST')\n parser.add_option('--seed', type='int', dest='RAND_SEED')\n parser.add_option('--phylofilt', action='store_true', dest='PHYLOFILTER')\n parser.add_option('--logspace', action='store_true', dest='LOGSPACE')\n\n parser.set_defaults(HMMER_ALIGN_BIN = 'hmmalign')\n parser.set_defaults(HMMER_BUILD_BIN = 'hmmbuild')\n parser.set_defaults(HMMER_ITER = 8)\n parser.set_defaults(REGRESSOR_METHOD = 'ridgelar')\n parser.set_defaults(FILTER = [])\n parser.set_defaults(CLONAL = False)\n parser.set_defaults(NUM_FEATURES = -1)\n parser.set_defaults(SUBTYPES = [])\n parser.set_defaults(WEIGHTING = False)\n parser.set_defaults(AMINO = False)\n parser.set_defaults(DNA = False)\n parser.set_defaults(STANFEL = False)\n parser.set_defaults(CV_FOLDS = 5)\n parser.set_defaults(LOOCV = False)\n parser.set_defaults(MAX_CONSERVATION = 1. ) # 93.)\n parser.set_defaults(MAX_GAP_RATIO = 0.1 ) # 93.)\n parser.set_defaults(MIN_CONSERVATION = 1. ) # 33.)\n parser.set_defaults(DATA = input_data(join(_IDEPI_PATH, 'data', 'allneuts.sqlite3')))\n parser.set_defaults(REFSEQ_FASTA = _HXB2_AMINO_FASTA)\n parser.set_defaults(HXB2_IDS = ['9629357', '9629363'])\n parser.set_defaults(RAND_SEED = 42) # make the behavior deterministic for now\n parser.set_defaults(PHYLOFILTER = False)\n parser.set_defaults(LOGSPACE = False)\n\n return parser\n\n\ndef run_tests():\n # set these to this so we don't exclude anything (just testing file generation and parsing)\n OPTIONS.NUM_FEATURES = 15 # should be enough, the number is known to be 13\n OPTIONS.MAXREL = False\n OPTIONS.DNA = False\n OPTIONS.MAX_CONSERVATION = 1.0\n OPTIONS.MAX_GAP_RATIO = 1.0\n OPTIONS.MIN_CONSERVATION = 1.0\n\n # if we don't do this, DOOMBUNNIES\n set_util_params(OPTIONS.HXB2_IDS)\n\n fd, sto_filename = mkstemp(); close(fd)\n\n try:\n fh = open(sto_filename, 'w')\n print(_TEST_AMINO_STO, file=fh)\n fh.close()\n\n alignment = AlignIO.read(sto_filename, 'stockholm')\n\n for OPTIONS.STANFEL in (True, False):\n\n if OPTIONS.STANFEL:\n OPTIONS.AMINO = False\n _TEST_NAMES = _TEST_STANFEL_NAMES\n _TEST_X = _TEST_STANFEL_X\n else:\n OPTIONS.AMINO = True\n _TEST_NAMES = _TEST_AMINO_NAMES\n _TEST_X = _TEST_AMINO_X\n\n alph = Alphabet(Alphabet.STANFEL if OPTIONS.STANFEL else Alphabet.DNA if OPTIONS.DNA else Alphabet.AMINO)\n\n # test mRMR and LSVM file generation\n ylabeler = Labeler(\n seqrecord_get_values,\n lambda seq: is_HXB2(seq) or False, # TODO: again filtration function\n )\n alignment, y, ic50gt = ylabeler(alignment)\n\n filter = naivefilter(\n OPTIONS.MAX_CONSERVATION,\n OPTIONS.MIN_CONSERVATION,\n OPTIONS.MAX_GAP_RATIO\n )\n refidx = alignment_identify_ref(alignment, is_HXB2)\n builder = DataBuilder(\n alignment,\n alph,\n refidx,\n filter\n )\n x = builder(alignment, refidx)\n colnames = builder.labels\n\n # test the feature names portion\n try:\n assert(len(colnames) == len(_TEST_NAMES))\n except AssertionError:\n raise AssertionError('gen: %s\\ntruth: %s' % (colnames, _TEST_NAMES))\n\n for name in _TEST_NAMES:\n try:\n assert(name in colnames)\n except AssertionError:\n raise AssertionError('ERROR: \\'%s\\' not found in %s' % (name, ', '.join(colnames)))\n\n assert(np.all(_TEST_X == x))\n\n assert(np.all(_TEST_Y == y))\n\n # TODO: generate and test the regressor data generation\n # print y, \"\\n\", x\n\n finally:\n remove(sto_filename)\n\n print('ALL TESTS PASS', file=sys.stderr)\n\n\ndef fix_hxb2_fasta():\n '''If DNA mode was selected but the AMINO reference sequence is still in place, fix it'''\n if OPTIONS.DNA == True and OPTIONS.REFSEQ_FASTA == _HXB2_AMINO_FASTA:\n OPTIONS.REFSEQ_FASTA = _HXB2_DNA_FASTA\n\n\ndef main(argv=sys.argv):\n global OPTIONS\n\n # so some option parsing\n option_parser = setup_option_parser()\n (OPTIONS, args) = option_parser.parse_args(argv)\n\n # do some argument parsing\n if OPTIONS.TEST:\n run_tests()\n return 0\n\n if OPTIONS.RAND_SEED is not None:\n seed(OPTIONS.RAND_SEED)\n\n if len(args) != 2:\n option_parser.error('ANTIBODY is a required argument')\n\n # check to make sure our mode is exclusive, and set the default (AMINO) if none is set\n if sum([1 for v in (OPTIONS.AMINO, OPTIONS.DNA, OPTIONS.STANFEL) if v]) > 1:\n option_parser.error('options --amino, --dna, and --stanfel are mutually exclusive')\n elif sum([1 for v in (OPTIONS.AMINO, OPTIONS.DNA, OPTIONS.STANFEL) if v]) == 0:\n OPTIONS.AMINO = True\n\n # validate the regression method\n cvopts = {}\n if OPTIONS.REGRESSOR_METHOD in regressor_classes:\n cvopts['regressorcls'] = regressor_classes[OPTIONS.REGRESSOR_METHOD]\n else:\n option_parser.error('%s not in the list of available regression methods: \\n %s' % (OPTIONS.REGRESSOR_METHOD,\n '\\n '.join(regressor_classes.keys())))\n\n if search(r'(?:lar|lasso)$', OPTIONS.REGRESSOR_METHOD):\n if OPTIONS.NUM_FEATURES < 0:\n OPTIONS.NUM_FEATURES = _DEFAULT_NUM_FEATURES\n cvopts['m'] = OPTIONS.NUM_FEATURES\n elif OPTIONS.NUM_FEATURES > 0:\n option_parser.error('--numfeats is a useless parameter for regression method `%s\\'' % OPTIONS.REGRESSOR_METHOD)\n\n cvopts['logspace'] = OPTIONS.LOGSPACE\n\n # validate the antibody argument, currently a hack exists to make PG9/PG16 work\n # TODO: Fix pg9/16 hax\n antibody = args[1].strip()\n valid_antibodies = sorted(OPTIONS.DATA.antibodies, key=lambda x: x.strip())\n if antibody not in valid_antibodies:\n if ' ' + antibody not in valid_antibodies:\n option_parser.error('%s not in the list of permitted antibodies: \\n %s' % (antibody, '\\n '.join([ab.strip() for ab in valid_antibodies])))\n else:\n antibody = ' ' + antibody\n\n # validate the subtype option\n valid_subtypes = sorted(OPTIONS.DATA.subtypes, key=lambda x: x.strip().upper())\n for subtype in OPTIONS.SUBTYPES:\n if subtype not in valid_subtypes:\n option_parser.error('%s not in the list of permitted subtypes: \\n %s' % (subtype, '\\n '.join([st.strip() for st in valid_subtypes])))\n\n if len(OPTIONS.FILTER) != 0:\n if OPTIONS.NUM_FEATURES != -1:\n option_parser.error('--filter and --numfeats are incompatible options')\n else:\n OPTIONS.NUM_FEATURES = len(OPTIONS.FILTER)\n else: # len(OPTIONS.FILTER) == 0\n if OPTIONS.NUM_FEATURES == -1:\n OPTIONS.NUM_FEATURES = _DEFAULT_NUM_FEATURES\n\n # destroy the parser because optparse docs recommend it\n option_parser.destroy()\n\n # use the default DNA HXB2 Reference seq if we define --dna but don't give a new default HXB2 Reference seq\n fix_hxb2_fasta()\n\n # set the util params\n set_util_params(OPTIONS.HXB2_IDS)\n\n # fetch the alphabet, we'll probably need it later\n alph = Alphabet(mode=Alphabet.STANFEL if OPTIONS.STANFEL else Alphabet.DNA if OPTIONS.DNA else Alphabet.AMINO)\n\n ab_basename = ''.join((\n antibody,\n '_dna' if OPTIONS.DNA else '_amino',\n '_clonal' if OPTIONS.CLONAL else ''\n ))\n alignment_basename = '_'.join((\n ab_basename,\n OPTIONS.DATA.basename_root,\n __VERSION__\n ))\n\n # grab the relevant antibody from the SQLITE3 data\n # format as SeqRecord so we can output as FASTA\n # and generate an alignment using HMMER if it doesn't already exist\n seqrecords, clonal = OPTIONS.DATA.seqrecords(antibody, OPTIONS.CLONAL, OPTIONS.DNA)\n\n # if clonal isn't supported, fallback to default\n if clonal != OPTIONS.CLONAL:\n ab_basename = ''.join(ab_basename.rsplit('_clonal', 1))\n alignment_basename = ''.join(alignment_basename.rsplit('_clonal', 1))\n\n sto_filename = alignment_basename + '.sto'\n\n alignment = generate_alignment(seqrecords, sto_filename, is_refidx, OPTIONS)[0]\n\n ylabeler = Labeler(\n seqrecord_get_values,\n lambda seq: is_HXB2(seq) or False, # TODO: again filtration function\n )\n alignment, y, ic50gt = ylabeler(alignment)\n\n filter = naivefilter(\n OPTIONS.MAX_CONSERVATION,\n OPTIONS.MIN_CONSERVATION,\n OPTIONS.MAX_GAP_RATIO,\n )\n refidx = alignment_identify_ref(alignment, is_HXB2)\n builder = DataBuilder(\n alignment,\n alph,\n refidx,\n filter\n )\n x = builder(alignment, refidx)\n colnames = builder.labels\n\n crossvalidator = CrossValidator(\n classifier_cls=Regressor,\n folds=OPTIONS.CV_FOLDS,\n classifier_kwargs=cvopts,\n scorer_cls=ContinuousPerfStats,\n scorer_kwargs={}\n )\n\n results = crossvalidator.crossvalidate(x, y, classifier_kwargs={}, extra=extract_feature_weights)\n\n ret = cv_results_to_output(results, colnames)\n\n print(pretty_fmt_results(ret))\n\n# mean_len = max([len('%.3f' % v.mu) for v in avg_stats.values()])\n# std_len = max([len('%.3f' % v.sigma) for v in avg_stats.values()])\n# std_len = int(log10(max([1.] + [v.sigma for v in avg_stats.values()]))) + 5\n# for k, v in sorted(avg_stats.items(), key = lambda x: x[0][0]):\n# v_str = u'= %*.3f \\xb1 %*.3f' % (mean_len, v.mu, std_len, v.sigma)\n# print(u' %s%s' % (k, v_str))\n#\n# for k, v in avg_weights.items():\n# if abs(v.mu) < 0.0001 and v.sigma == 0.:\n# del avg_weights[k]\n#\n# print('\\nSignificant positions (top %d):' % (len(avg_weights)))\n#\n# if len(avg_weights) > 0:\n# name_len = max(len(k) for k in avg_weights.keys())\n# mean_len = max(len('% .1f' % v.mu) for v in avg_weights.values())\n# std_len = max(len('%.1f' % v.sigma) for v in avg_weights.values())\n# N_len = max(len('%d' % len(v.values)) for v in avg_weights.values())\n# for k, v in sorted(avg_weights.items(), key=lambda x: int(sub(r'[a-zA-Z\\[\\]]+', '', x[0]))):\n# print(u' %-*s % *.1f \\xb1 %*.1f (N = %*d)' % (name_len, k, mean_len, v.mu, std_len, v.sigma, N_len, len(v.values)))\n#\n# print('\\n')\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"nlhepler/idepi","sub_path":"idepi/scripts/_regressor.py","file_name":"_regressor.py","file_ext":"py","file_size_in_byte":16738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32628626265","text":"import click\nfrom collections import defaultdict\nimport json\nimport ntpath\nimport struct\nimport sys\n\nimport lief\n\n#flags and constants\nDEBUG = False\n#DEBUG = True\nVERBOSE = True\nimport_blacklist = [\n# \"ntdll.dll\",\n# \"wow64cpu.dll\",\n# \"wow64.dll\",\n# \"wow64win.dll\",\n# \"kernelbase.dll\",\n]\n\n_nc = lambda path: ntpath.normcase(path)\nbuf_to_uint32 = lambda buf: struct.unpack(\"I\", bytes(buf))[0]\nuint32_to_buf = lambda n: struct.pack(\"I\", n)\n\n\ndef debug_print(msg):\n if DEBUG:\n print(msg)\n\n\ndef verbose_print(msg):\n if VERBOSE:\n print(msg)\n\n\ndef parse_volatility_json(fn):\n with open(fn, 'r') as fd:\n res = json.load(fd)\n vads = [dict(zip(res['columns'], r)) for r in res['rows']]\n return (vads, res)\n\n\ndef parse_impscan_json(fn):\n is_list, _ = parse_volatility_json(fn)\n is_rva = defaultdict(lambda: int(0x10000))\n is_lookup = defaultdict(dict)\n for entry in is_list:\n lib = _nc(entry['Module'])\n if lib in import_blacklist:\n continue\n func = entry['Function']\n iat = entry['IAT'] & 0xffff\n if iat < is_rva[lib]:\n is_rva[lib] = iat\n is_lookup[lib][func] = iat\n obj = lambda: None\n obj.rva = is_rva\n obj.lookup = is_lookup\n obj.raw = is_list\n return obj\n\n\ndef fix_oep(_binary, oep):\n try:\n new_oep = int(oep, 16)\n except ValueError:\n new_oep = int(oep)\n old_oep = int(_binary.optional_header.addressof_entrypoint)\n verbose_print(\n \"old_oep={} new_oep={}\".format(hex(old_oep), hex(new_oep))\n )\n _binary.optional_header.addressof_entrypoint = new_oep\n\n\ndef add_new_imports(_binary, _new):\n #copy _new since we are popping items\n _new = dict(_new)\n #first, add new functions for existing libraries\n for lib in _binary.imports:\n lib_name = _nc(lib.name)\n if lib_name not in _new:\n continue\n for new_func in _new.pop(lib_name):\n debug_print(\n \"add_entry: lib={} func={}\".format(lib_name, new_func)\n )\n lib.add_entry(new_func)\n #second, add new libraries and their new functions\n for lib_name in list(_new.keys()):\n debug_print(\n \"add_library: lib={}\".format(lib_name)\n )\n lib = _binary.add_library(lib_name)\n for new_func in _new.pop(lib_name):\n debug_print(\n \"add_entry: lib={} func={}\".format(lib_name, new_func)\n )\n lib.add_entry(new_func)\n if len(_new):\n print(\"warning: there are left over libraries after adding new imports\")\n for lib_name in _new:\n print(\"warning: left over library name={}\".format(lib_name))\n\n\ndef get_virtual_memory_size(_binary):\n min_offset = sys.maxsize\n total_size = 0\n for sec in _binary.sections:\n if sec.virtual_address < min_offset:\n min_offset = sec.virtual_address\n total_size += sec.virtual_size\n total_size += min_offset\n return total_size\n\n\ndef align(vaddr, page_size=4096):\n \"\"\"page align an address\"\"\"\n slack = vaddr % page_size\n pad = page_size - slack\n aligned_vaddr = vaddr + pad\n return aligned_vaddr\n\n\ndef alignments(value, multiple_of):\n \"\"\"align an address with a section alignment\"\"\"\n if value <= multiple_of:\n return multiple_of\n c = 1\n while value > multiple_of * c:\n c += 1\n return multiple_of * c\n\n\ndef fix_section(section, next_section_vaddr):\n section.sizeof_raw_data = next_section_vaddr - section.virtual_address\n section.pointerto_raw_data = section.virtual_address\n section.virtual_size = section.sizeof_raw_data \n\n\ndef fix_sections(sections, virtualmemorysize):\n num_sections = len(sections)\n for i in range(num_sections - 1):\n curr_section = sections[i]\n next_section = sections[i + 1]\n fix_section(curr_section, next_section.virtual_address)\n # handle last section differently: we have no next section's virtual address. Thus we take the end of the image\n fix_section(sections[num_sections - 1], virtualmemorysize)\n\n\ndef restore_section_data(_binary, _bytes):\n for section in _binary.sections:\n start = section.virtual_address\n end = start + section.virtual_size\n section.content = _bytes[start:end]\n _build = lief.PE.Builder(_binary)\n _build.build_imports(False)\n _build.patch_imports(False)\n _build.build()\n return lief.parse(_build.get_build())\n\n\ndef fix_image_size(_binary, padded_size):\n sec_alignment = _binary.optional_header.section_alignment\n _binary.optional_header.sizeof_image = alignments(padded_size, sec_alignment)\n\n\ndef fix_section_mem_protections(_binary):\n #lazy strategy: make them all rwx\n rwx_flags = (\n lief.PE.SECTION_CHARACTERISTICS.MEM_READ\n | lief.PE.SECTION_CHARACTERISTICS.MEM_WRITE\n | lief.PE.SECTION_CHARACTERISTICS.MEM_EXECUTE\n )\n for sec in _binary.sections:\n sec.characteristics |= rwx_flags\n sec.characteristics &= ~lief.PE.SECTION_CHARACTERISTICS.CNT_UNINITIALIZED_DATA\n\n\ndef fix_checksum(_binary, checksum=0):\n \"\"\"\n The following are checked for validation at load time:\n all drivers\n any DLL loaded at boot time\n any DLL that is loaded into a critical Windows process\n Regular PE executables do not need a valid checksum\n \"\"\"\n _binary.optional_header.checksum = checksum\n\n\ndef fix_imagebase(_binary, base=0x400000):\n _binary.optional_header.imagebase = base\n\n\ndef fix_dll_characteristics(_binary):\n \"\"\"remove dynamic base feature to prevent relocations\"\"\"\n _binary.optional_header.remove(lief.PE.DLL_CHARACTERISTICS.DYNAMIC_BASE)\n #_binary.optional_header.remove(lief.PE.DLL_CHARACTERISTICS.NX_COMPAT)\n\n\ndef build_imports(_binary):\n builder = lief.PE.Builder(_binary)\n builder.build_imports(True)\n builder.patch_imports(False)\n builder.build()\n return lief.parse(builder.get_build())\n\n\ndef save_build(_builder, new_fn):\n verbose_print(\"saving new pe: file={}\".format(new_fn))\n _builder.write(new_fn)\n\n\ndef remove_iat_dir(_binary):\n _iat = _binary.data_directory(lief.PE.DATA_DIRECTORY.IAT)\n _iat.rva = 0\n _iat.size = 0\n\n\ndef find_import_descriptor(lib_name, _bin):\n import_dir = _bin.data_directory(lief.PE.DATA_DIRECTORY.IMPORT_TABLE)\n sec = _bin.section_from_rva(import_dir.rva)\n sec_base = sec.virtual_address\n sec_bytes = bytes(sec.content)\n found_offset = None\n offset = import_dir.rva - sec_base\n descriptor_bytes = bytes(sec_bytes[offset:offset+20])\n lookup_rva, _, _, name_rva, iat_rva = struct.unpack(\"IIIII\", descriptor_bytes)\n while lookup_rva != 0x0 and name_rva != 0x0:\n name_offset = name_rva - sec_base\n if name_offset > len(sec_bytes) or name_offset <= 0x0:\n debug_print(f\"bad name_offset: 0x{name_offset:x}\")\n else:\n tmp_name = \"\"\n b = sec_bytes[name_offset]\n while b != 0x0:\n tmp_name += chr(b)\n name_offset += 1\n b = sec_bytes[name_offset]\n debug_print(f\"lib={tmp_name} offset=0x{offset:x}\")\n if _nc(tmp_name) == _nc(lib_name):\n found_offset = offset\n offset += 20\n descriptor_bytes = bytes(sec_bytes[offset:offset+20])\n (lookup_rva, _, _, name_rva, iat_rva\n )= struct.unpack(\"IIIII\", descriptor_bytes)\n if found_offset is not None:\n debug_print(f\"found_offset=0x{found_offset:x}\")\n return sec, found_offset\n\n\ndef patch_iat(_bin, _impscan):\n import_descriptor_offsets = {}\n imports_sec = None\n addr_size = 4\n for lib in _bin.imports:\n if _nc(lib.name) not in _impscan.rva:\n debug_print(f\"cannot find rva for {lib.name}\")\n continue\n il_rva = lib.import_lookup_table_rva\n il_offset = _bin.rva_to_offset(il_rva)\n il_section = _bin.section_from_offset(il_offset)\n il_bytes = il_section.content\n tb_offset = il_offset - il_section.pointerto_raw_data\n tb_ptr = int(tb_offset)\n tb_val = buf_to_uint32(bytes(il_bytes[tb_ptr:tb_ptr + addr_size]))\n for entry in lib.entries:\n #iat_val = int(entry.iat_value)\n iat_val = tb_val\n debug_print(\"{}:{}:iat_value=0x{:x}\".format(lib.name, entry.name, iat_val))\n vaddr = _impscan.lookup[_nc(lib.name)][entry.name]\n old_iat = _bin.get_content_from_virtual_address(vaddr, 4)\n old_iat = buf_to_uint32(old_iat)\n debug_print(\"vaddr=0x{:x} old_iat=0x{:x}\".format(vaddr, old_iat))\n _bin.patch_address(vaddr, iat_val, size=4)\n tb_ptr += addr_size\n tb_val = buf_to_uint32(bytes(il_bytes[tb_ptr:tb_ptr + addr_size]))\n new_rva = _impscan.rva[_nc(lib.name)]\n debug_print(\"{}:old_rva=0x{:x} new_rva=0x{:x}\".format(\n lib.name, lib.import_address_table_rva, new_rva))\n imports_sec, desc_offset = find_import_descriptor(lib.name, _bin)\n #lib.import_address_table_rva = new_rva\n #fix descriptor\n sec_bytes = list(imports_sec.content)\n buf = uint32_to_buf(new_rva)\n off = int(desc_offset) + 16\n for i, b in enumerate(buf):\n sec_bytes[off + i] = b\n imports_sec.content = sec_bytes\n\n\ndef create_ldr_map(_fn):\n ldr_raw, _ = parse_volatility_json(_fn)\n ldr_map = [_nc(l['MappedPath'])\n for l in ldr_raw]\n return ldr_map\n\n\ndef read_dll_redirects(in_fn):\n with open(in_fn, 'r') as fd:\n _win7_redirects = json.load(fd)\n return _win7_redirects\n\n\ndef create_imports_by_jump(_raw):\n _imports_by_jump = {i['IAT']: i for i in _raw}\n return _imports_by_jump\n\n\ndef get_split_jumps(_imp_by_j):\n sorted_jumps = sorted(_imp_by_j.keys())\n split_jumps = []\n last_jump = sorted_jumps[0]\n cur_jumps = [last_jump]\n for jump in sorted_jumps[1:]:\n #debug_print(f\"split_jumps: {jump}\")\n if jump == last_jump + 4:\n cur_jumps.append(jump)\n else:\n split_jumps.append(cur_jumps)\n cur_jumps = [jump]\n last_jump = jump\n split_jumps.append(cur_jumps)\n return split_jumps\n\n\ndef reconstruct_imports(_ldr_fn, _redir_fn, _impscan_obj):\n #get map of DLLs that the binary mapped when it ran\n _map = create_ldr_map(_ldr_fn)\n #read master map of windows redirects\n _redirs = read_dll_redirects(_redir_fn)\n #map imports by IAT address\n _imp_by_j = create_imports_by_jump(_impscan_obj.raw)\n #sort and split up all the jumps by null-terminated gaps\n _splits = get_split_jumps(_imp_by_j)\n\n #do the magic\n chosen_so_far = []\n _new_imports = {}\n #each jump set is one library to import\n for j, jset in enumerate(_splits):\n #count how many times each library.function combo can be used\n lib_stats = defaultdict(int)\n #keep track of each possible lib.func combo per jump\n funcs_in_jset = []\n scanned_func = []\n #each jump is one function for this library\n for jump in jset:\n lib_name = _imp_by_j[jump]['Module']\n lib_bn = _nc(lib_name)\n func = _imp_by_j[jump]['Function']\n debug_print(f\"processing scanned function {lib_bn}.{func}\")\n scanned_func.append((lib_bn,func))\n slot_dict = {lib_bn: func}\n funcs_in_jset.append(slot_dict)\n lib_stats[lib_bn] += 1\n if lib_bn in _redirs and func in _redirs[lib_bn]:\n redirs = _redirs[lib_bn][func]\n for dll_path, other_func in redirs:\n if dll_path in _map:\n path_bn = ntpath.basename(dll_path)\n slot_dict[path_bn] = other_func\n lib_stats[path_bn] += 1\n #print(lib_stats)\n #figure out which lib to use:\n #\n found_candidate = False\n #strategy 1:\n # there are N functions,\n # and foo.dll is seen N times,\n # of all libs counts, if only one of them is seen N times\n # then it, foo.dll, must be the correct lib\n candidates = [lib\n for lib, count in lib_stats.items()\n if count >= len(jset)]\n if len(candidates) == 1:\n chosen_lib = candidates[0]\n if chosen_lib not in chosen_so_far:\n debug_print(f\"choosing {chosen_lib}\")\n chosen_so_far.append(chosen_lib)\n found_candidate = True\n else:\n print(f\"error: strategy 1 used, \"\n \"but {chosen_lib} was already chosen\")\n else:\n #strategy 2:\n # just pick the last lib that hasn't been chosen yet\n for candidate in candidates[::-1]:\n if candidate not in chosen_so_far:\n chosen_lib = candidate\n debug_print(f\"choosing {chosen_lib}\")\n chosen_so_far.append(chosen_lib)\n found_candidate = True\n break\n if not found_candidate:\n print(lib_stats)\n print(candidates)\n raise RuntimeError(\"no valid candidate found\")\n else:\n #we found it\n _new_imports[chosen_lib] = [slot[chosen_lib] for slot in funcs_in_jset]\n for i, _scanned_tup in enumerate(scanned_func):\n scanned_lib, _func = _scanned_tup\n if scanned_lib != chosen_lib:\n f_rva = _impscan_obj.lookup[scanned_lib][_func]\n chosen_func = funcs_in_jset[i][chosen_lib]\n _impscan_obj.lookup[chosen_lib][chosen_func] = f_rva\n if chosen_lib not in _impscan_obj.rva:\n lib_jumps = _splits[j]\n debug_print(f\"lib_jumps[{j}] = chosen:{chosen_lib} = {lib_jumps}\")\n iat_start = min(lib_jumps) & 0xFFFF # mask vaddr to make it rva\n debug_print(f\"iat_start for lib {chosen_lib} = {iat_start:x}\")\n _impscan_obj.rva[chosen_lib] = iat_start\n return _new_imports\n\n\n@click.command()\n@click.argument('pe_fn')\n@click.argument('new_pe_fn')\n@click.argument('impscan_fn')\n@click.argument('oep')\n@click.argument('ldr_fn')\n@click.argument('redirects_fn')\ndef main(pe_fn, new_pe_fn, impscan_fn, oep, ldr_fn, redirects_fn):\n verbose_print(\"opening existing pe: file={}\".format(pe_fn))\n with open(pe_fn, 'rb') as fd:\n pe_bytes = list(fd.read())\n binary = lief.parse(pe_bytes)\n\n impscan_obj = parse_impscan_json(impscan_fn)\n imports_to_add = reconstruct_imports(ldr_fn, redirects_fn, impscan_obj)\n verbose_print(\n \"found {} libraries with {} new import functions\".format(\n len(imports_to_add), sum([len(v) for k,v in imports_to_add.items()])\n )\n )\n\n fix_oep(binary, oep)\n virtual_size = get_virtual_memory_size(binary)\n padded_virtual_size = align(virtual_size)\n fix_sections(binary.sections, padded_virtual_size)\n binary = restore_section_data(binary, pe_bytes)\n fix_image_size(binary, padded_virtual_size)\n fix_section_mem_protections(binary)\n fix_checksum(binary)\n fix_dll_characteristics(binary)\n\n add_new_imports(binary, imports_to_add)\n binary = build_imports(binary)\n remove_iat_dir(binary)\n patch_iat(binary, impscan_obj)\n save_build(binary, new_pe_fn)\n\nif __name__ == '__main__':\n main()\n","repo_name":"carter-yagemann/vmi-unpack","sub_path":"scripts/fix_binary.py","file_name":"fix_binary.py","file_ext":"py","file_size_in_byte":15452,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"81"} +{"seq_id":"69916877705","text":"import torch\nimport torch.nn.functional as F\nfrom torch.nn.modules.loss import _WeightedLoss\n\n\nclass CasRelLoss(_WeightedLoss):\n def __init__(self, weight=None, reduction='mean'):\n super().__init__(weight=weight, reduction=reduction)\n self.weight = weight\n self.reduction = reduction\n\n @staticmethod\n def _compute_loss(gold, pred, mask):\n pred = pred.squeeze(-1)\n loss_ = F.binary_cross_entropy(pred, gold, reduction='none')\n if loss_.shape != mask.shape:\n mask = mask.unsqueeze(-1)\n loss_ = torch.sum(loss_ * mask) / torch.sum(mask)\n return loss_\n\n def forward(self, logits, inputs):\n\n pred_sub_heads, pred_sub_tails, pred_obj_heads, pred_obj_tails = logits\n\n sub_heads_loss = CasRelLoss._compute_loss(\n inputs['sub_heads'], pred_sub_heads,\n inputs['attention_mask']\n )\n sub_tails_loss = CasRelLoss._compute_loss(\n inputs['sub_tails'], pred_sub_tails,\n inputs['attention_mask']\n )\n obj_heads_loss = CasRelLoss._compute_loss(\n inputs['obj_heads'],\n pred_obj_heads,\n inputs['attention_mask']\n )\n obj_tails_loss = CasRelLoss._compute_loss(\n inputs['obj_tails'],\n pred_obj_tails,\n inputs['attention_mask']\n )\n\n loss = (sub_heads_loss + sub_tails_loss) + (obj_heads_loss + obj_tails_loss)\n\n return loss\n","repo_name":"xiangking/ark-nlp","sub_path":"ark_nlp/factory/loss_function/casrel_loss.py","file_name":"casrel_loss.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":301,"dataset":"github-code","pt":"81"} +{"seq_id":"41690773494","text":"\"\"\"Create table store.ProductCategory\n\nRevision ID: a8877159cdff\nRevises: 46f7c0d382af\nCreate Date: 2021-04-16 12:00:11.679121\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a8877159cdff'\ndown_revision = '46f7c0d382af'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table('ProductCategory',\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('name', sa.String(255), nullable=False),\n sa.Column('description', sa.String(255), nullable=True),\n schema='store')\n\n\ndef downgrade():\n op.drop_table(table_name='ProductCategory', schema='store')\n","repo_name":"ALMPartners/ahjo","sub_path":"test/samples/mssql_project/alembic/versions/20210416_12_00_11_a8877159cdff_create_table_store_productcategory.py","file_name":"20210416_12_00_11_a8877159cdff_create_table_store_productcategory.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"16626642101","text":"from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport math\nfrom .submodule import convbn, convbn_3d, feature_extraction, disparityregression\n\nclass hourglass(nn.Module):\n def __init__(self, inplanes, activation_mode='ReLU', upsample_mode='transpose'):\n super(hourglass, self).__init__()\n activation = nn.ReLU(inplace=True) if (activation_mode == 'ReLU') else nn.LeakyReLU(0.2, inplace=True)\n self.activation = activation\n self.upsample_mode = upsample_mode\n\n # Group 1\n self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes*2, kernel_size=3, stride=2, pad=1),\n activation)\n self.conv2 = convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1)\n\n # Group 2\n self.conv3 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=2, pad=1),\n activation)\n self.conv4 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1),\n activation)\n\n # Group 3\n if upsample_mode == 'transpose':\n self.conv5 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes*2, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False),\n nn.BatchNorm3d(inplanes*2)) #+conv2\n\n self.conv6 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False),\n nn.BatchNorm3d(inplanes)) #+x\n elif upsample_mode == 'interpolation_conv':\n # self.conv5 = nn.Sequential(nn.Conv3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, padding=1, bias=False),\n # nn.BatchNorm3d(inplanes*2))\n self.conv5 = nn.Sequential(nn.BatchNorm3d(inplanes*2))\n self.conv6 = nn.Sequential(nn.Conv3d(inplanes*2, inplanes, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm3d(inplanes))\n # self.conv6 = nn.Sequential(nn.BatchNorm3d(inplanes))\n\n def forward(self, x, presqu, postsqu):\n out = self.conv1(x) #in:1/4 out:1/8\n pre = self.conv2(out) #in:1/8 out:1/8\n if postsqu is not None:\n pre = self.activation(pre + postsqu)\n else:\n pre = self.activation(pre)\n\n out = self.conv3(pre) #in:1/8 out:1/16\n out = self.conv4(out) #in:1/16 out:1/16\n\n if self.upsample_mode == 'transpose':\n if presqu is not None:\n post = self.activation(self.conv5(out) + presqu) #in:1/16 out:1/8\n else:\n post = self.activation(self.conv5(out) + pre)\n\n out = self.conv6(post) # in:1/8 out:1/4\n elif self.upsample_mode == 'interpolation_conv':\n d, h, w = out.shape[-3:]\n out = F.interpolate(out, (2*d, 2*h, 2*w), mode='trilinear', align_corners=True)\n if presqu is not None:\n post = self.activation(self.conv5(out) + presqu)\n else:\n post = self.activation(self.conv5(out) + pre)\n\n d, h, w = post.shape[-3:]\n post_2x = F.interpolate(post, (2*d, 2*h, 2*w), mode='trilinear', align_corners=True)\n out = self.conv6(post_2x)\n\n return out, pre, post\n\nclass PSMNet(nn.Module):\n def __init__(self, maxdisp, activation_mode='ReLU', enable_out_list=False, reduce_glass=0, cv_type=None):\n super(PSMNet, self).__init__()\n activation = nn.ReLU(inplace=True) if (activation_mode == 'ReLU') else nn.LeakyReLU(0.2, inplace=True)\n self.enable_out_list = enable_out_list\n self.maxdisp = maxdisp\n self.reduce_glass = reduce_glass\n self.cv_type = cv_type\n cv_in = 32 if (self.cv_type == 'abs_diff') else 64\n self.feature_extraction = feature_extraction(activation_mode)\n\n self.dres0 = nn.Sequential(convbn_3d(cv_in, 32, 3, 1, 1),\n activation,\n convbn_3d(32, 32, 3, 1, 1),\n activation)\n\n self.dres1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n activation,\n convbn_3d(32, 32, 3, 1, 1)) \n\n if reduce_glass <= 2:\n self.dres2 = hourglass(32)\n self.classif1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n activation,\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))\n\n if reduce_glass <= 1:\n self.dres3 = hourglass(32)\n self.classif2 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n activation,\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))\n\n if reduce_glass <= 0:\n self.dres4 = hourglass(32)\n self.classif3 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n activation,\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.Conv3d):\n n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n def forward(self, left, right, edge_weights=None):\n # Zhiwei left: (1,3,256,512)->down to 1/4, refimg_fea: (1,32,64,128)\n refimg_fea, _ = self.feature_extraction(left)\n targetimg_fea, _ = self.feature_extraction(right)\n\n #matching\n if self.cv_type == 'abs_diff':\n cost = Variable(torch.FloatTensor(refimg_fea.size()[0], refimg_fea.size()[1],\n self.maxdisp // 4, refimg_fea.size()[2],\n refimg_fea.size()[3]).zero_()).cuda()\n for i in range(self.maxdisp // 4):\n if i > 0:\n cost[:, :, i, :, i:] = (refimg_fea[:, :, :, i:] - targetimg_fea[:, :, :, :-i]).abs()\n else:\n cost[:, :, i, :, :] = (refimg_fea - targetimg_fea).abs()\n else:\n cost = Variable(torch.FloatTensor(refimg_fea.size()[0], refimg_fea.size()[1]*2,\n self.maxdisp//4, refimg_fea.size()[2],\n refimg_fea.size()[3]).zero_()).cuda()\n for i in range(self.maxdisp//4):\n if i > 0 :\n cost[:, :refimg_fea.size()[1], i, :, i:] = refimg_fea[:,:,:,i:]\n cost[:, refimg_fea.size()[1]:, i, :, i:] = targetimg_fea[:,:,:,:-i]\n else:\n cost[:, :refimg_fea.size()[1], i, :, :] = refimg_fea\n cost[:, refimg_fea.size()[1]:, i, :, :] = targetimg_fea\n\n cost = cost.contiguous()\n\n # Zhiwei cost: (1,64,48,64,128)\n\n cost0 = self.dres0(cost)\n cost0 = self.dres1(cost0) + cost0\n\n if self.reduce_glass <= 2:\n out1, pre1, post1 = self.dres2(cost0, None, None)\n out1 = out1+cost0\n cost1 = self.classif1(out1)\n\n if self.reduce_glass <= 1:\n out2, pre2, post2 = self.dres3(out1, pre1, post1)\n out2 = out2+cost0\n cost2 = self.classif2(out2) + cost1\n\n if self.reduce_glass <= 0:\n out3, pre3, post3 = self.dres4(out2, pre1, post2)\n out3 = out3+cost0\n cost3 = self.classif3(out3) + cost2\n\n if self.training:\n # Zhiwei From (1,1,48,64,128)->up to 4, (1,1,192,256,512)\n if self.reduce_glass <= 2:\n cost1 = F.interpolate(cost1, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear', align_corners=True)\n cost1 = torch.squeeze(cost1,1)\n pred1 = F.softmax(cost1,dim=1)\n pred1 = disparityregression(self.maxdisp)(pred1)\n\n if self.reduce_glass <= 1:\n cost2 = F.interpolate(cost2, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear', align_corners=True)\n cost2 = torch.squeeze(cost2,1)\n pred2 = F.softmax(cost2,dim=1)\n pred2 = disparityregression(self.maxdisp)(pred2)\n\n if self.reduce_glass <= 0:\n cost3 = F.interpolate(cost3, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear', align_corners=True)\n cost3 = torch.squeeze(cost3,1)\n pred3 = F.softmax(cost3,dim=1)\n #For your information: This formulation 'softmax(c)' learned \"similarity\"\n #while 'softmax(-c)' learned 'matching cost' as mentioned in the paper.\n #However, 'c' or '-c' do not affect the performance because feature-based cost volume provided flexibility.\n pred3 = disparityregression(self.maxdisp)(pred3)\n else:\n if self.reduce_glass == 2:\n cost1 = F.interpolate(cost1, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear', align_corners=True)\n cost1 = torch.squeeze(cost1,1)\n pred1 = F.softmax(cost1,dim=1)\n pred1 = disparityregression(self.maxdisp)(pred1)\n elif self.reduce_glass == 1:\n cost2 = F.interpolate(cost2, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear', align_corners=True)\n cost2 = torch.squeeze(cost2,1)\n pred2 = F.softmax(cost2,dim=1)\n pred2 = disparityregression(self.maxdisp)(pred2)\n elif self.reduce_glass == 0:\n cost3 = F.interpolate(cost3, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear', align_corners=True)\n cost3 = torch.squeeze(cost3,1)\n pred3 = F.softmax(cost3,dim=1)\n pred3 = disparityregression(self.maxdisp)(pred3)\n\n if self.reduce_glass == 0:\n if self.training:\n if self.enable_out_list:\n return [pred1, pred2, pred3], None\n else:\n return pred1, pred2, pred3\n else:\n return pred3, None\n elif self.reduce_glass == 1:\n if self.training:\n if self.enable_out_list:\n return [pred1, pred2], None\n else:\n return pred1, pred2\n else:\n return pred2, None\n elif self.reduce_glass == 2:\n if self.training:\n if self.enable_out_list:\n return [pred1], None\n else:\n return pred1\n else:\n return pred1, None\n","repo_name":"zwxu064/RANP","sub_path":"third_party/PSM/models/stackhourglass.py","file_name":"stackhourglass.py","file_ext":"py","file_size_in_byte":11105,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"33944579473","text":"from .pg import PG as PG\nimport verilog as verilog\n\n\nclass BitwisePGLogic:\n module_name = 'BitwisePGLogic'\n\n def __init__(self, bitwidth):\n self.bitwidth = bitwidth\n\n def inputs(self):\n c0 = \"C_0\"\n As = ['A_{}'.format(ix+1) for ix in range(self.bitwidth)]\n Bs = ['B_{}'.format(ix+1) for ix in range(self.bitwidth)]\n return c0, As, Bs\n\n def outputs(self):\n Ps = ['P_{}'.format(ix) for ix in range(self.bitwidth+1)]\n Gs = ['G_{}'.format(ix) for ix in range(self.bitwidth+1)]\n return Ps, Gs\n\n def pg_inputs(self, ix):\n a = {\"port\": \"A_i\",\n \"connector\": \"A_{}\".format(ix)}\n b = {\"port\": \"B_i\",\n \"connector\": \"B_{}\".format(ix)}\n return [a, b]\n\n def pg_outputs(self, ix):\n p = {\"port\": \"P_i\",\n \"connector\": \"P_{}\".format(ix)}\n g = {\"port\": \"G_i\",\n \"connector\": \"G_{}\".format(ix)}\n return [p, g]\n\n def verilog(self, file_path, file_name):\n m = verilog.Module(BitwisePGLogic.module_name)\n\n c0, input_As, input_Bs = self.inputs()\n output_Ps, output_Gs = self.outputs()\n\n for bit in range(self.bitwidth+1):\n # Comment\n m.comment('Bit {}'.format(bit))\n if bit == 0:\n m.stmt_assign(\"P_0\", \"0\")\n m.stmt_assign(\"G_0\", \"C_0\")\n\n else:\n # Instantiation\n _pg_inputs = self.pg_inputs(bit)\n _pg_outputs = self.pg_outputs(bit)\n new_pg = PG().instantiation(instance_name=\"PG_Bit_{}\".format(bit),\n inputs=_pg_inputs, outputs=_pg_outputs)\n\n # Add Instruction\n m.instruction(new_pg)\n\n for (a, b) in zip(input_As, input_Bs):\n m.input(a, 'input')\n m.input(b, 'input')\n m.input(c0, 'input')\n\n for (p, g) in zip(output_Ps, output_Gs):\n m.output(p, 'output')\n m.output(g, 'output')\n\n m.start()\n\n m.end()\n\n m.write(file_path, file_name)\n\n def instantiation(self, instance_name, inputs, outputs):\n \"\"\"\n inputs: dict{ port: ? , connector: ?}\n outputs: dict{ port: ? , connector: ?}\n \"\"\"\n return verilog.Module.instantiate(module_name=BitwisePGLogic.module_name,\n instance_name=instance_name,\n inputs=inputs,\n outputs=outputs)\n","repo_name":"ahedayat/Brent-Kung-Adder","sub_path":"adders/brentkung/bitwise_pg_logic.py","file_name":"bitwise_pg_logic.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25638674716","text":"import datetime\nimport json\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.db.models import Sum\nfrom django.forms.formsets import formset_factory\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.core.urlresolvers import reverse, reverse_lazy\n\nfrom django.views.generic import DetailView, ListView, TemplateView\nfrom django.views.generic.edit import FormView, CreateView\n\nfrom budget.forms import (BudgetForm, BudgetCategoryForm, TransactionForm,\n BudgetCategoryFormSet, RecurringTransactionForm, CategoryForm,\n Category\n )\nfrom budget.models import (Transaction, Budget, BudgetCategory,\n RecurringTransactionDef\n )\n\ndef delete_budget(request, pk):\n budget = get_object_or_404(Budget, pk=pk)\n month = budget.month\n year = budget.year\n budget.delete()\n messages.success(\n request,\n 'Budget for %s, %s successfully deleted' % (month, year)\n )\n return HttpResponseRedirect(reverse('budgets'))\n\ndef deactivate_recurring_transaction_def(request, pk):\n '''\n sets recurring transaction def to inactive. If we delete it, then the \n transactions created will be deleted in the cascade.\n '''\n recurring_def = get_object_or_404(RecurringTransactionDef, pk=pk)\n recurring_def.active = False\n recurring_def.save()\n messages.success(\n request,\n \"Recurring transaciton successfully deleted!\"\n )\n return HttpResponseRedirect(reverse('recurring_transactions'))\n\ndef delete_transaction(request):\n transaction_id = request.POST.get('transaction_id')\n # import ipdb; ipdb.set_trace()\n transaction = get_object_or_404(Transaction, pk=transaction_id)\n transaction_type = transaction.transaction_type\n amount = transaction.amount\n category = transaction.category\n transaction.delete()\n\n messages.success(request,\n 'Successfully deleted %s transaction of $%s from %s' % (\n transaction_type, amount, category\n )\n )\n return HttpResponse()\n\ndef update_transaction(request):\n transaction_id = request.POST.get('pk')\n transaction = get_object_or_404(Transaction, pk=transaction_id)\n field = request.POST.get('name')\n value = request.POST.get('value')\n\n if field == 'for_business':\n if value.strip().lower() in ('yes', 'y'):\n value = True\n else:\n value = False\n\n setattr(transaction, field, value)\n try:\n # transaction.clean()\n transaction.save()\n return HttpResponse('Success!')\n except:\n response_data = {'status': 'error', 'msg': 'Invalid Entry!'}\n response_json = json.dumps(response_data)\n return HttpResponse(response_json, content_type='application/json')\n\ndef get_budget_summary(request, pk):\n budget = get_object_or_404(Budget, pk=pk)\n details = budget.get_details()\n del details['income_transactions']\n del details['expense_transactions']\n del details['expense_budget_categories']\n del details['income_budget_categories']\n response_json = json.dumps(details)\n return HttpResponse(response_json, content_type=\"application/json\")\n\nclass OverviewView(TemplateView):\n template_name = 'overview.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(OverviewView, self).get_context_data(*args, **kwargs)\n current_year = datetime.datetime.now().year\n budgets = Budget.objects.filter(\n year__in=(current_year, current_year-1)\n )\n budget_contexts = []\n categories = Category.objects.all()\n date_data = []\n income_data = []\n expense_data = []\n net_income_data = []\n for budget in budgets:\n total_income = budget.get_total_income()\n total_expenses = budget.get_total_expenses()\n budget_contexts.append({\n 'total_income':total_income,\n 'total_expenses':total_expenses,\n 'net_income':total_income - total_expenses,\n 'three_month_net':budget.get_three_month_net_income(),\n 'year_net':budget.get_yearly_net_income(),\n 'start_date':budget.start_date,\n 'id':budget.id\n })\n date_data.append(budget.start_date.strftime('%b %y'))\n income_data.append(total_income)\n expense_data.append(total_expenses)\n net_income_data.append(round(total_income - total_expenses, 2))\n context.update({\n 'budgets': budget_contexts,\n 'categories': categories,\n 'date_data': date_data,\n 'income_data': income_data,\n 'expense_data': expense_data,\n 'net_income_data': net_income_data})\n return context\n\nclass BusinessExpenseOverview(TemplateView):\n template_name = 'business_overview.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(BusinessExpenseOverview, self).get_context_data(\n *args, **kwargs\n )\n budgets = Budget.objects.all()\n budget_contexts = []\n expense_spent_data = []\n expense_date_data = []\n for budget in budgets:\n business_expenses = Transaction.objects.filter(\n transaction_type='expense',\n budget=budget,\n for_business=True\n )\n total = business_expenses.aggregate(Sum('amount'))['amount__sum'] or 0\n budget_contexts.append({\n 'total': business_expenses.aggregate(\n Sum('amount')\n )['amount__sum'] or 0,\n 'id': budget.pk,\n 'date': budget.start_date,\n })\n expense_date_data.append(budget.start_date.strftime('%b %y'))\n expense_spent_data.append(float(total))\n context.update({\n 'business_totals': budget_contexts,\n 'expense_date_data': expense_date_data,\n 'expense_spent_data': expense_spent_data \n })\n return context\n\nclass CategoryOverview(TemplateView):\n template_name = 'category_overview.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(CategoryOverview, self).get_context_data(*args, **kwargs)\n category_id = kwargs.get('pk')\n if not category_id:\n raise Http404\n category = get_object_or_404(Category, id=category_id)\n\n # get all the expense budget categories and their context\n expense_budget_categories = BudgetCategory.objects.filter(\n category=category, income=False)\n expense_bc_contexts = []\n #expense_left_data = []\n expense_spent_data = []\n expense_date_data = []\n expense_budgeted_data = []\n for bc in expense_budget_categories:\n left = bc.amount_left_in_category()\n spent = bc.amount_spent_in_category()\n expense_bc_contexts.append({\n 'left': left,\n 'spent': spent,\n 'date': bc.budget.start_date,\n 'budgeted': bc.amount,\n 'id': bc.id,\n 'budget_id':bc.budget.id\n })\n expense_date_data.append(bc.budget.start_date.strftime('%b %y'))\n expense_spent_data.append(float(spent))\n #expense_left_data.append(float(left))\n expense_budgeted_data.append(float(bc.amount))\n\n #get the income budget categories and their context\n income_budget_categories = BudgetCategory.objects.filter(\n category=category, income=True)\n income_bc_contexts = []\n #income_left_data = []\n income_earned_data = []\n income_date_data = []\n income_budgeted_data = []\n for bc in income_budget_categories:\n left = bc.amount_left_in_category()\n earned = bc.amount_earned_in_category()\n income_bc_contexts.append({\n 'left': left,\n 'earned': earned,\n 'date': bc.budget.start_date,\n 'budgeted': bc.amount,\n 'id': bc.id,\n 'budget_id':bc.budget.id\n })\n income_date_data.append(bc.budget.start_date.strftime('%b %y'))\n income_earned_data.append(float(earned))\n #income_left_data.append(float(left))\n income_budgeted_data.append(float(bc.amount)) \n\n context.update({\n 'category':category,\n 'expense_budget_categories':expense_bc_contexts,\n 'expense_date_data':expense_date_data,\n 'expense_spent_data':expense_spent_data,\n 'expense_budgeted_data':expense_budgeted_data,\n 'income_budget_categories':income_bc_contexts,\n 'income_date_data':income_date_data,\n 'income_earned_data':income_earned_data,\n 'income_budgeted_data':income_budgeted_data,\n })\n return context\n\ndef get_business_expense_transactions(request):\n pk = request.GET.get('pk')\n if not pk:\n raise Http404\n budget = get_object_or_404(Budget, id=pk)\n transactions = Transaction.objects.filter(\n budget=budget, for_business=True, transaction_type='expense')\n return render(request, 'business_transactions.html',\n {'transactions':transactions, 'budget':budget})\n\ndef get_transactions_for_budget_category(request):\n pk = request.GET.get('pk')\n if not pk:\n raise Http404\n bc = get_object_or_404(BudgetCategory, id=pk)\n transactions = Transaction.objects.filter(\n budget=bc.budget, category=bc.category)\n return render(request, 'transactions_for_bc.html',\n {'transactions':transactions, 'budget_category':bc})\n\ndef get_transactions_for_budget(request):\n pk = request.GET.get('pk')\n if not pk:\n raise Http404\n budget = get_object_or_404(Budget, id=pk)\n transactions = Transaction.objects.filter(\n budget=budget)\n return render(request, 'transactions_for_budget.html',\n {'transactions':transactions, 'budget':budget})\n\n# Create your views here.\nclass BudgetCategoryFormView(TemplateView):\n template_name = 'edit_budget_category_form.html'\n BudgetCategoryFormSet = formset_factory(\n BudgetCategoryForm, formset=BudgetCategoryFormSet,\n can_delete=True, extra=2)\n\n def dispatch(self, *args, **kwargs):\n self.budget = get_object_or_404(Budget, pk=kwargs.get('pk'))\n return super(BudgetCategoryFormView, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, *args, **kwargs):\n budget_categories = BudgetCategory.objects.filter(\n budget=self.budget)\n\n #build initial data from existing models\n values_list = []\n for bc in budget_categories:\n values_list.append({'budget':bc.budget, 'amount':bc.amount, \n 'category':bc.category, 'income':bc.income})\n\n context = super(BudgetCategoryFormView, self).get_context_data(\n *args, **kwargs)\n category_formset = self.BudgetCategoryFormSet(initial=values_list)\n context.update({'category_formset':category_formset,\n 'budget':self.budget}\n )\n return context\n\n def post(self, request, *args, **kwargs):\n category_formset = self.BudgetCategoryFormSet(request.POST)\n if category_formset.is_valid():\n for form in category_formset:\n if form.has_changed():\n\n form.save(self.budget)\n\n messages.success(request, \n \"You've updated this budget successfully\"\n )\n return HttpResponseRedirect(\n reverse('edit_categories', kwargs={'pk':kwargs.get('pk')})\n )\n return render(request, self.template_name, \n {'category_formset':category_formset, 'budget':self.budget}\n )\n\nclass CreateBudgetFormView(CreateView):\n template_name = 'create_budget_form.html'\n # success_url = '/budgets/' #this should direct to budget categories form view\n model = Budget\n fields = ['month', 'year']\n\n def get_form(self, form_class):\n form = super(CreateBudgetFormView, self).get_form(form_class)\n form.fields['month'].widget.attrs.update({'class':'form-control'})\n form.fields['year'].widget.attrs.update({'class':'form-control'})\n return form\n\n def form_valid(self, form):\n #there must be a better place to put this\n #if there is a pk kwarg, this means that we want to create this budget\n #but clone the categories of the budget matching pk\n budget_to_clone_pk = self.kwargs.get('pk')\n bcs_to_clone = []\n if budget_to_clone_pk:\n budget_to_clone = Budget.objects.get(pk=budget_to_clone_pk)\n bcs_to_clone = BudgetCategory.objects.filter(\n budget=budget_to_clone)\n\n self.object = form.save()\n\n for bc in bcs_to_clone:\n new_bc = BudgetCategory(\n budget=self.object,\n category=bc.category,\n amount=bc.amount,\n income=bc.income\n )\n new_bc.save()\n if budget_to_clone_pk:\n messages.success(\n self.request,\n \"You've successfully cloned the %s\" % budget_to_clone\n )\n else:\n messages.success(self.request, \"New budget created!\")\n return HttpResponseRedirect(self.get_success_url())\n\n def get_success_url(self):\n created_budget = self.object\n return reverse('edit_categories', kwargs={'pk':created_budget.pk})\n\nclass CategoryFormView(FormView):\n template_name = 'category_form.html'\n form_class = CategoryForm\n success_url = reverse_lazy('category_types')\n\n def form_valid(self, form):\n form.save()\n messages.success(self.request,\n \"You've successfully added a category type!\")\n return super(CategoryFormView, self).form_valid(form)\n\n\nclass RecurringTransactionFormView(FormView):\n template_name = 'recurring_transaction_form.html'\n form_class = RecurringTransactionForm\n success_url = reverse_lazy('recurring_transactions') #really? look into lazyloading reverse\n\n def dispatch(self, request, *args, **kwargs):\n #figure out whether this is creating a new object or \n #editing an existing one\n pk = kwargs.get('pk')\n if pk:\n recurring = RecurringTransactionDef.objects.filter(pk=pk)\n self.initial = recurring.values()[0]\n #values off object give category_id not category so need\n #to manually add in category key into initial.\n self.initial.update({'category':recurring[0].category})\n return super(RecurringTransactionFormView, self).dispatch(\n request, *args, **kwargs\n )\n\n def form_valid(self, form):\n pk = None\n if self.initial:\n pk = self.initial.get('id')\n form.save(pk=pk)\n if pk:\n messages.success(self.request,\n \"You've successfully edited a recurring transaction!\")\n else:\n messages.success(self.request,\n \"You've successfully added a recurring transaction!\")\n return super(RecurringTransactionFormView, self).form_valid(form)\n\n#This formview is somewhat unncessary now since we have the form \n#in the budget detail view. deprecated\n# class TransactionFormView(FormView):\n# template_name = 'transaction_form.html'\n# form_class = TransactionForm\n# success_url = '/transactions/'\n\n# def get_context_data(self, *args, **kwargs):\n# context = super(TransactionFormView, self).get_context_data(\n# *args, **kwargs)\n# today = datetime.date.today()\n# month = today.month\n# year = today.year\n# budget, created = Budget.objects.get_or_create(month=month, year=year)\n\n# context.update({\n# 'summary':budget.get_details()\n# })\n# return context\n\n# def form_valid(self, form): \n# obj = form.save()\n# messages.success(self.request,\n# \"You've successfully added a transaction to %s!\" % obj.budget)\n# return super(TransactionFormView, self).form_valid(form)\n\nclass RecurringTransactionListView(ListView):\n model = RecurringTransactionDef\n template_name = 'recurring_transactions_list.html'\n queryset = RecurringTransactionDef.objects.filter()\n context_object_name = 'recurring_transactions'\n\nclass CategoryListView(ListView):\n model = Category\n template_name = 'category_list.html'\n queryset = Category.objects.all().order_by('name')\n context_object_name = 'category_list'\n\nclass BudgetListView(ListView):\n model = Budget\n template_name = 'budget_list.html'\n queryset = Budget.objects.all().order_by('-year', '-month')\n context_object_name = 'budget_list'\n\n #paginate this later\n # def get_context_data(self, *args, **kwargs):\n # context = super(BudgetListView, self).get_context_data(*args, **kwargs)\n # budget_list = s\n\ndef current_budget(request):\n today = datetime.datetime.today()\n month = today.month\n year = today.year\n\n budget = get_object_or_404(Budget, month=month, year=year)\n return redirect(reverse_lazy('budget', kwargs={'pk':budget.pk}))\n\nclass BudgetView(DetailView):\n model = Budget\n template_name = 'budget_detail.html'\n context_object_name = 'budget'\n\n def get_context_data(self, *args, **kwargs):\n context = super(BudgetView, self).get_context_data(*args, **kwargs)\n budget = self.object\n today = datetime.datetime.today()\n this_month = today.month\n this_year = today.year\n\n #if looking at the current month, give today's date otherwise the first\n #of the month\n\n #mimic last transaction inputted into given budget\n\n last_transactions = Transaction.objects.filter(\n budget=budget, recurring_transaction_def__isnull=True\n ).order_by('-pk')\n if last_transactions:\n last = last_transactions[0]\n transaction_form = TransactionForm(\n initial={'date': last.date, 'category': last.category,\n 'transaction_type': last.transaction_type})\n \n elif this_month == budget.month and this_year == budget.year:\n transaction_form = TransactionForm(\n initial={'date':today.date()})\n else:\n transaction_form = TransactionForm(\n initial={'date':budget.start_date})\n context.update({\n 'summary': budget.get_details(),\n 'transaction_form': transaction_form \n })\n return context\n\n\n def post(self, request, *args, **kwargs):\n transaction_form = TransactionForm(request.POST)\n if transaction_form.is_valid():\n transaction_form.save()\n #at this point in the method call chain, self.object does not exist\n #so use self.get_object() instead\n messages.success(request,\n \"You've succesfully added a transaction to %s!\" % \\\n self.get_object())\n redirect_to = reverse('budget', args=args, kwargs=kwargs)\n return HttpResponseRedirect(redirect_to)\n\n context = self.get_context_data(*args, **kwargs)\n context.update({'transaction_form': transaction_form})\n return render(request, self.template_name, context)\n","repo_name":"takeahsiaor/budget","sub_path":"budget/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31111788980","text":"\"\"\"blog22 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom account.views import CustomAuthToken,user_Register,user_crud,test_auth\nfrom django.contrib import admin\nfrom post.views import get_all_post,add_new_post,my_content,post_rud,make_comment,commentsclass\nurlpatterns = [\n path('admin/', admin.site.urls),\n # path('apia/', views.obtain_auth_token),\n path('login/', CustomAuthToken.as_view()),\n path('usereg/', user_Register.as_view()),\n # path('tes/',test_auth),\n path('user/',user_crud.as_view()),\n path('home/',get_all_post),\n path('addpost/',add_new_post),\n path('mycontent/',my_content),\n path('mycontent/',post_rud.as_view()),\n path(\"addcomment/\",make_comment),\n path(\"comment/\",commentsclass.as_view())\n]\n","repo_name":"badushaebrahim/dockerizeddjango","sub_path":"blog22/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36490825870","text":"# Задана натуральная степень k. Сформировать случайным образом список коэффициентов (значения от 0 до 100)\n# многочлена и записать в файл многочлен степени k.\n#\n# Пример:\n#\n# - k=2 => 2*x² + 4*x + 5 = 0 или x² + 5 = 0 или 10*x² = 0\nimport random\n\n\ndef fill_list_nums(a, b, len):\n list_nums = []\n for item in range(len):\n list_nums.append(random.randint(a, b))\n return list_nums\n\n\ndef get_first_position_not_zero(list):\n result = -1\n for item in range(len(list) - 1, -1, -1):\n if list[item] != 0:\n result = item\n break\n return result\n\n\ndef isZero(list, pos):\n result = False\n if list[pos] == 0:\n result = True\n return result\n\n\ndef generate_x(coef, deg):\n coef = str(abs(coef))\n deg = str(abs(deg))\n if coef == '0': return ''\n # if coef == '1': coef = ''\n deg_out = 'x^' + deg\n if deg == '0': deg_out = ''\n if deg == '1': deg_out = 'x'\n return coef + deg_out\n\n\n# def get_polynomial(list):\n# s = ''\n# j = get_first_position_not_zero(list)\n# s += generate_x(list[j], len(list) - j)\n# for k in range(len(list) - j, 0, -1):\n# x = generate_x(list[k - 1], k - 1)\n# print(list[k - 1], k - 1, x)\n# if x == '':\n# continue\n# s += ' + ' + x\n# print(s)\n\ndef get_polynomial(list):\n s = ''\n for i in range(len(list)):\n if i != len(list) - 1 and list[i] != 0 and i != len(list) - 2:\n s += f'{list[i]}x^{len(list) - i - 1}'\n if list[i + 1] != 0:\n s += ' + '\n elif i == len(list) - 2 and list[i] != 0:\n s += f'{list[i]}x'\n if list[i + 1] != 0:\n s += ' + '\n elif i == len(list) - 1 and list[i] != 0:\n s += f'{list[i]} = 0'\n elif i == len(list) - 1 and list[i] == 0:\n s += ' = 0'\n return s\n\n\nmin = 0\nmax = 100\nk = 5\nk = int(input(\"Введите натуральную степень многочлена k = \"))\nlist = fill_list_nums(min, max + 1, k + 1)\n# list = [0,0,0,0,0,0]\n# list = [2, 6, 7, 8, 1, 0]\n# list = []\npos_not_zero = get_first_position_not_zero(list)\nif len(list) == 0 or pos_not_zero == -1:\n print('Многочлен невозможно создать len = 0 or pos_not_zero == -1')\n exit()\npolynomial = get_polynomial(list)\nprint(polynomial)\nwith open('polynomial.txt', 'w') as data:\n data.write(polynomial)\n","repo_name":"ArtsmanDan/python_base","sub_path":"sem4HW/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33235974378","text":"from tempfile import TemporaryDirectory\nfrom typing import Literal, Iterable\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom uuid import uuid4\nfrom enum import Enum\nimport io\n\nfrom django.template.loader import get_template\n\nimport pdfkit\n\n\nclass PageSize(Enum):\n A4 = \"A4\"\n LP2824_PLUS = \"LP2824_PLUS\"\n\n\ndef _get_page_size_options(page_size: PageSize) -> dict:\n _page_size = {\n PageSize.A4: {'page-size': \"A4\"},\n PageSize.LP2824_PLUS: {\n 'page-width': 58,\n 'page-height': 30,\n },\n }\n return _page_size.get(page_size, {})\n\n\n@dataclass(frozen=True)\nclass PDFBlock:\n contract_number: str\n instances: Iterable\n\n\n@dataclass(frozen=True)\nclass PDFContext:\n items: list[PDFBlock]\n page_size: PageSize\n\n\ndef create_pdf(context: PDFContext) -> io.BytesIO:\n template = get_template(\"inventory/download-qr-codes.html\")\n html = template.render({\"items\": context.items, \"page_size\": context.page_size})\n buffer = io.BytesIO()\n options = {\n **_get_page_size_options(context.page_size),\n 'margin-top': '0',\n 'margin-right': '0',\n 'margin-bottom': '0',\n 'margin-left': '0'\n }\n with TemporaryDirectory() as dir_:\n file_path = Path(dir_, f\"{uuid4()}.pdf\")\n pdfkit.from_string(html, file_path, options=options)\n with open(file_path, \"rb\") as file:\n buffer.write(file.read())\n buffer.seek(0)\n return buffer\n","repo_name":"Demetrous-fd/AGOI","sub_path":"backend/inventory/pdf.py","file_name":"pdf.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3894624532","text":"from django.contrib import admin\nfrom rvalues.models import RValue\n\n\n# Register your models here.\nclass RValueAdmin(admin.ModelAdmin):\n ''' Admin module for Rvalues model '''\n prepopulated_fields = {'slug': ('material',)}\n list_display = (\n 'material',\n 'r_value',\n 'density',\n 'timestamp',\n 'perm',\n 'absorbtion',\n 'flamespread',\n 'smoke',\n 'toxicity',\n 'agingeffect',\n 'timestamp',\n )\n search_fields = ['material']\n\nadmin.site.register(RValue, RValueAdmin)\n","repo_name":"VincentVetsch/McGlennInspections","sub_path":"McGlennInspections/rvalues/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1079388978","text":"import cv2\n\n# Load the cascade classifier\n#download this here https://raw.githubusercontent.com/opencv/opencv/4.x/data/haarcascades/haarcascade_frontalface_default.xml just go there and ctrl + s\nface_cascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\") \n\n# Read the input image\nimg = cv2.imread(\"image.jpeg\")\n\n# Convert into grayscale\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# Detect faces\nfaces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)\n\n# Draw rectangle around the faces\nfor (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2)\n\n# Save the output image\ncv2.imwrite(\"output.jpg\", img)","repo_name":"Pandeyashish17/Face-detection-using-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11556487055","text":"from joulescope import scan_require_one\nfrom joulescope.data_recorder import DataRecorder\nimport sys\nimport signal\nimport time\n\n\ndef run():\n quit_ = False\n \n def do_quit(*args, **kwargs):\n nonlocal quit_\n quit_ = 'quit from SIGINT'\n\n def on_stop(event, message):\n nonlocal quit_\n quit_ = 'quit from stop duration' \n\n if len(sys.argv) != 2:\n print(\"usage: python3 capture_jls_v1.py [filename]\")\n return 1\n filename = sys.argv[1]\n signal.signal(signal.SIGINT, do_quit)\n device = scan_require_one(config='auto')\n with device:\n recorder = DataRecorder(filename,\n calibration=device.calibration)\n try:\n device.stream_process_register(recorder)\n data = device.start(stop_fn=on_stop)\n print('Capturing data: type CTRL-C to stop')\n while not quit_:\n time.sleep(0.01)\n finally:\n recorder.close()\n device.stream_process_unregister(recorder)\n return 0\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"jetperch/pyjoulescope_examples","sub_path":"bin/capture_jls_v1.py","file_name":"capture_jls_v1.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"18639687370","text":"import os\nimport sys\nimport cv2\nimport pickle\nimport time\n\nimport numpy as np\nfrom arithmetic_coding import encoder, freqTable, bitOutStream\n\nclass arithmetic_compress:\n def __init__(self, image_path, output_path):\n # image path\n self.image_path = image_path\n # compressed path\n self.output_path = output_path\n # string output path (string before changed to bytearray)\n self.string_path = \"encoded_string\"\n\n # image\n self.image = None\n # 1d array create from image\n self.array = None\n\n # number of bits on image\n self.numbits_input = 0\n # number of bits on compression\n self.numbits_output = 0\n\n # compress ratio: equal bitsinput / bitsoutput\n self.ratio = 1.0\n \n # dictionary frequencies make from array\n self.freq = {}\n \n # time using for compressing\n self.time = time.time()\n\n self.read()\n self.toarray()\n\n # read image from image_path and compute bits input\n def read(self):\n try:\n self.image = cv2.imread(self.image_path)\n w, h, c = self.image.shape\n self.numbits_input = w * h * c * 8\n except:\n raise Exception(\"Image Invalid\")\n \n # convert image into 1d-array\n def toarray(self):\n YCrCb = cv2.cvtColor(self.image, cv2.COLOR_BGR2YCrCb)\n \n Y, Cr, Cb = YCrCb[:, :, 0], YCrCb[:, :, 1], YCrCb[:, :, 2]\n \n w, h, _c = self.image.shape\n Y = Y.reshape((1, w*h))[0]\n Cr = Cr.reshape((1, w*h))[0]\n Cb = Cb.reshape((1, w*h))[0]\n\n self.array = list(np.concatenate((Y, Cr, Cb), axis = 0))\n\n def compress(self, numbits):\n \n # push eof into array\n self.array.append(256)\n\n # set up frequencies table\n freq = freqTable()\n freq.set_array(self.array)\n self.freq = freq.freq_dict\n\n # set up bit string output\n bitout = bitOutStream(self.string_path)\n\n model = encoder(numbits = numbits, bitout = bitout)\n \n # Encode each elements in array image\n # and save it into string_path\n leng_array = len(self.array)\n t = leng_array * 1.0 / 100\n percent = 0\n for i,elem in enumerate(self.array):\n if i * 100.0/ leng_array > percent:\n percent += 1\n sys.stdout.write(\"Processing:\\t{} %\\r\".format(percent))\n sys.stdout.flush()\n model.encode(freq, elem)\n model.finish()\n print('')\n \n self.get_total_bitout()\n \n def get_total_bitout(self):\n # bit use for dictionary\n self.numbits_output = 256 * 8 * 2\n\n # bit use for encoded file\n with open(self.string_path, 'r') as f:\n encoded = f.read()\n self.numbits_output += len(encoded)\n \n self.ratio = self.numbits_input * 1.0 / self.numbits_output\n \n # write to bytes output\n def write(self):\n # Save the w, h (shape of image) into bitstring\n # It has structure: length_shape (8bits) + value (length_shape bits)\n shape_str = \"\"\n w, h, _ = self.image.shape\n wstring = '{:b}'.format(w)\n hstring = '{:b}'.format(h)\n wlength = '{:05b}'.format(len(wstring))\n hlength = '{:05b}'.format(len(hstring))\n shape_str = wlength + wstring + hlength + hstring\n\n # pop the eof out of dictionary\n if 256 in self.freq:\n self.freq.pop(256)\n\n dict_str = \"\"\n # The first is length of dictionary\n dict_str += '{:09b}'.format(len(self.freq))\n\n # Dictionary is save as \"key\" + \"length_frequency\" + \"frequency\"\n # \"key\" : 8 bits\n # \"length_frequency\" : 5 bits\n # \"frequency\" : length_frequency bits\n for key, frequency in self.freq.items(): \n str_frequency = '{:b}'.format(frequency)\n length_frequency = '{:05b}'.format(len(str_frequency))\n dict_str += '{:08b}'.format(key) + length_frequency + str_frequency\n \n # read encoded array\n with open(self.string_path, 'r') as f:\n encoded_array = f.read()\n \n # combine shapestr, dictstr and encoded array\n encoded_text = shape_str + dict_str + encoded_array\n \n # add some zero into encoded_text to make sure they can save as byte array\n leng = len(encoded_text)\n num_zero = 8 - leng % 8\n\n encoded_text += '{:08b}'.format(num_zero)\n encoded_text = '0' * num_zero + encoded_text\n # extract every 8 bits and save them as an item into bytearray\n b = bytearray()\n for i in range(0, leng + num_zero + 8, 8):\n b.append(int(encoded_text[i: i + 8], 2))\n\n # save byte array to output\n with open(self.output_path, 'wb') as f:\n pickle.dump(b, f)\n self.time = time.time() - self.time\n print(\"Input: \\'%s\\'\\tOutput: \\'%s\\'\\tBit Input: %d\\tBit Output: %d\\tRatio: %.2f\\tTime: %.2f\" %(self.image_path, self.output_path,self.numbits_input, self.numbits_output, self.ratio, self.time))\n\n\ndef main(argv):\n\n images, output = argv\n \n compressor = arithmetic_compress(images, output)\n compressor.compress(numbits = 32)\n compressor.write()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"Ardian-thedarkk/image-compression","sub_path":"arithmetic-coding/arithmetic_compress.py","file_name":"arithmetic_compress.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38218282086","text":"\"\"\"\nfile: xia_filters.py\nbrief:\nauthor: S. V. Paulauskas\ndate: December 14, 2020\n\"\"\"\nfrom math import exp\nfrom statistics import mean\n\nfrom dsp_toolkit.filtering.filters import calculate_trapezoidal_filter\n\n\n# TODO: Add the esums to the output!\n\ndef calculate_baseline(data, trigger, length):\n offset = trigger - length - 5\n if offset < 0:\n raise ValueError(\"First trigger happened too early to calculate the baseline!\")\n return mean(data[:offset])\n\n\ndef calculate_energy_filter_coefficients(length, decay_constant):\n if decay_constant == 0:\n raise ValueError(\"Decay constant must be non-zero!\")\n\n beta = exp(-1.0 / decay_constant)\n\n if beta == TypeError:\n raise beta\n\n cg = 1 - beta\n ctmp = 1 - pow(beta, length)\n\n return {'beta': beta, \"rise\": -(cg / ctmp) * pow(beta, length), \"gap\": cg, \"fall\": cg / ctmp}\n\n\ndef calculate_energy_filter_limits(trigger_position, length, gap, data_length):\n min_limit = trigger_position - length - 10\n if min_limit < 0:\n raise ValueError(\"Trigger happened too early in the trace to calculate the energy!\")\n if trigger_position + length + gap > data_length:\n raise ValueError(\"Trigger happened too late in the trace to calculate the energy!\")\n\n return {\"rise\": (min_limit, min_limit + length - 1),\n \"gap\": (min_limit + length, min_limit + length + gap - 1),\n \"fall\": (min_limit + length + gap, min_limit + 2 * length + gap - 1)}\n\n\ndef calculate_energy(data, baseline, coefficients, limits):\n data_without_baseline = [x - baseline for x in data]\n sum_rise = sum(data_without_baseline[limits['rise'][0]: limits['rise'][1]])\n sum_gap = sum(data_without_baseline[limits['gap'][0]: limits['gap'][1]])\n sum_fall = sum(data_without_baseline[limits['fall'][0]: limits['fall'][1]])\n\n return coefficients['rise'] * sum_rise + coefficients['gap'] * sum_gap + coefficients[\n 'fall'] * sum_fall, {'rising_sum': sum_rise, 'gap_sum': sum_gap, 'falling_sum': sum_fall}\n\n\ndef calculate_energy_filter(data, length, gap, baseline, coefficients):\n if not data:\n raise ValueError(\"Data length cannot be less than 0\")\n\n offset = 2 * length + gap - 1\n\n if len(data) < offset:\n raise ValueError(f\"The data length({len(data)}) is too small for the requested filter \"\n f\"size ({offset})!\")\n\n data_no_baseline = [x - baseline for x in data]\n response = [0] * len(data)\n for x in range(offset, len(data_no_baseline)):\n esumL = sum(data_no_baseline[x - offset:x - offset + length])\n esumG = sum(data_no_baseline[x - offset + length: x - offset + length + gap])\n esumF = sum(data_no_baseline[x - offset + length + gap: x - offset + 2 * length + gap])\n response[x] = coefficients['rise'] * esumL + coefficients['gap'] * esumG + coefficients[\n 'fall'] * esumF\n\n for x in range(0, offset):\n response[x] = response[offset]\n\n return response\n\n\ndef calculate_trigger_filter(data, length, gap, threshold):\n if not data:\n raise ValueError(\"Cannot calculate a filter without some data!\")\n\n has_recrossed = False\n triggers = list()\n trigger_filter = calculate_trapezoidal_filter(data, length, gap)\n\n for val in trigger_filter:\n if val >= threshold:\n if not triggers:\n triggers.append(trigger_filter.index(val))\n if has_recrossed:\n triggers.append(trigger_filter.index(val))\n has_recrossed = False\n else:\n if triggers:\n has_recrossed = True\n\n if not triggers:\n raise ValueError(\"No triggers found in the provided data!\")\n\n return triggers, trigger_filter\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n from dsp_toolkit.sample_data import sample_traces as st\n\n signal = st.plastic_scintillator\n\n trig_params = {\"l\": 15, \"g\": 5, \"t\": 10}\n triggers, trigger_filter = calculate_trigger_filter(signal, trig_params['l'],\n trig_params['g'], trig_params['t'])\n baseline = calculate_baseline(signal, triggers[0], trig_params['l'])\n\n energy_params = {\"l\": 10, \"g\": 5, \"t\": 2.5}\n energy_filter_coefficients = calculate_energy_filter_coefficients(energy_params['l'],\n energy_params['t'])\n energy, energy_sums = calculate_energy(signal, baseline, energy_filter_coefficients,\n calculate_energy_filter_limits(triggers[0],\n energy_params['l'],\n energy_params['g'],\n len(signal)))\n energy_filter = calculate_energy_filter(signal, energy_params['l'],\n energy_params['g'], baseline,\n energy_filter_coefficients)\n\n plt.plot(signal, label=\"Data\")\n plt.plot(trigger_filter, label=\"Trigger Filter\")\n plt.plot(energy_filter, label=\"Energy Filter\")\n for trigger in triggers:\n plt.axvline(x=trigger, color='purple', label=\"Trigger\")\n plt.hlines(y=baseline, xmin=triggers[0] - 10, xmax=len(signal), colors='red',\n label=\"Baseline\")\n plt.legend()\n plt.gca().set(xlabel=\"Bin\", ylabel='Adc Units / Bin',\n title=f\"Calculated energy: {round(energy, 2)}\")\n plt.xlim([round(0.75 * triggers[0]), len(signal)])\n plt.show()\n","repo_name":"spaulaus/dsp_toolkit","sub_path":"dsp_toolkit/filtering/xia_filters.py","file_name":"xia_filters.py","file_ext":"py","file_size_in_byte":5630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25310387121","text":"from django.urls import reverse\nfrom django.utils.safestring import mark_safe\nfrom django.contrib import admin\nfrom .models import (\n Lesson, \n Student,\n Teacher,\n Methodist,\n Group,\n LessonType,\n ClassRoom,\n Activity\n)\n\ndef object_url(obj):\n url = reverse(\n f'admin:{obj._meta.app_label}'\n f'_{obj._meta.object_name.lower()}_change',\n kwargs={'object_id':obj.pk}\n )\n return mark_safe(\n f'{obj}'\n )\n\n\nclass UserRelatedAdmin(admin.ModelAdmin):\n def full_name(self, person):\n return str(person.user)\n\n def user_full_name(self, person):\n return object_url(person.user)\n\n\n@admin.register(Group)\nclass GroupAdmin(admin.ModelAdmin):\n list_display = (\n 'pk',\n 'name',\n )\n\n\n@admin.register(Student)\nclass StudentAdmin(UserRelatedAdmin):\n list_display = (\n 'pk',\n 'full_name',\n 'group_name',\n 'user_full_name',\n )\n\n def group_name(self, student):\n group = student.group\n\n url = reverse(\n f'admin:{group._meta.app_label}'\n f'_{group._meta.object_name.lower()}_change',\n kwargs={'object_id':group.pk}\n )\n return mark_safe(\n f'{group.name}'\n )\n\n\n@admin.register(Teacher)\nclass TeacherAdmin(UserRelatedAdmin):\n list_display = (\n 'pk',\n 'full_name',\n 'work_days',\n 'user_full_name',\n)\n\n\n@admin.register(Methodist)\nclass MethodistAdmin(UserRelatedAdmin):\n list_display = (\n 'pk',\n 'full_name',\n 'user_full_name',\n)\n\n\n@admin.register(ClassRoom)\nclass ClassRoomAdmin(UserRelatedAdmin):\n list_display = (\n 'pk',\n 'number',\n)\n\n\n@admin.register(LessonType)\nclass LessonTypeAdmin(admin.ModelAdmin):\n list_display = (\n 'pk',\n 'name',\n)\n\n\n@admin.register(Lesson)\nclass LessonAdmin(admin.ModelAdmin):\n list_display = (\n 'pk',\n 'lesson_type',\n 'groups_list',\n 'teacher_link',\n 'classroom_link',\n 'day',\n 'start_time',\n 'duration',\n 'subgroup',\n )\n search_fields = (\n 'teacher',\n 'classroom',\n 'lesson_type',\n 'start_time',\n 'duration'\n )\n list_filter = ('start_time','classroom',)\n empty_value_display = '-отсутствует-'\n\n\n def teacher_link(self, obj):\n return object_url(obj.teacher)\n\n def classroom_link(self, obj):\n return object_url(obj.classroom)\n\n def groups_list(self, obj):\n return mark_safe(\"\\n\".join([object_url(g) for g in obj.groups.all()]))\n\n\n@admin.register(Activity)\nclass ActivityAdmin(admin.ModelAdmin):\n list_display = (\n 'pk',\n 'name',\n 'describe',\n 'groups_list',\n 'teacher_link',\n 'classroom_link',\n 'day',\n 'start_time',\n 'duration',\n )\n search_fields = (\n 'name',\n 'describe',\n 'teacher',\n 'classroom',\n 'day',\n 'start_time',\n 'duration',\n )\n list_filter = ('start_time','classroom',)\n empty_value_display = '-отсутствует-'\n\n def teacher_link(self, obj):\n teacher = obj.teacher\n if teacher:\n return object_url(teacher)\n\n def classroom_link(self, obj):\n classroom = obj.classroom\n if classroom:\n return object_url(classroom)\n\n def groups_list(self, obj):\n return mark_safe(\"\\n\".join([object_url(g) for g in obj.groups.all()]))\n","repo_name":"Alexey-zaliznuak/TimeTableManagment","sub_path":"TimeTableManagment/timetables/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5735135983","text":"import ast\nfrom _ast import FunctionDef, AsyncFunctionDef, ClassDef, Expr, AST\nfrom enum import Enum\nfrom typing import Tuple, List, Any, Dict, Optional\n\n\nclass ScopeType(Enum):\n Function = 0\n Class = 1\n\n\nclass ScopeItem:\n def __init__(self,\n node_range: Tuple[int, int],\n node_ast: ast.AST,\n node_type: ScopeType):\n self.node_range = node_range\n self.node_ast = node_ast\n self.node_type = node_type\n\n def get_range(self):\n return self.node_range\n\n def get_ast(self):\n return self.node_ast\n\n def get_type(self):\n return self.node_type\n\n def get_range_len(self):\n return self.node_range[1] - self.node_range[0]\n\n def contains(self, other) -> bool:\n return (self.get_range_len() > other.get_range_len() and\n self.node_range[0] <= other.get_range()[0] and\n self.node_range[1] >= other.get_range()[1])\n\n def get_lines(self) -> List[int]:\n return list(range(self.node_range[0], self.node_range[1] + 1))\n\n def __eq__(self, other):\n return (self.node_range[0] == other.get_range()[0] and\n self.node_range[1] == other.get_range()[1] and\n self.node_type == other.get_type() and\n self.node_ast == other.get_ast())\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n\nclass AddModeManager:\n\n def __init__(self,\n buggy_content: str,\n fixed_content: str,\n start_add_line_num: int,\n end_add_line_num: int,\n fixed_to_buggy_map: Dict[int, int]):\n self.__buggy_content_lines = buggy_content.splitlines()\n self.__fixed_content_lines = fixed_content.splitlines()\n self.__fixed_start_add_line_num = start_add_line_num\n self.__fixed_end_add_line_num = end_add_line_num\n self.__fixed_to_buggy_map = fixed_to_buggy_map\n self.__buggy_content_ast = ast.parse(buggy_content)\n self.__fixed_content_ast = ast.parse(fixed_content)\n\n def get_add_mode_ground_truth(self) -> Tuple[int, int]:\n before_start_add_line = -1\n after_end_add_line = -1\n\n # Finding the scope of the starting and ending added lines in fixed version.\n start_add_line_fixed_scope = self.get_sorted_scope_line_numbers(self.__fixed_content_ast,\n self.__fixed_content_lines,\n self.__fixed_start_add_line_num)\n end_add_line_fixed_scope = self.get_sorted_scope_line_numbers(self.__fixed_content_ast,\n self.__fixed_content_lines,\n self.__fixed_end_add_line_num)\n\n # Getting the scope of start add line in the fixed version.\n before_start_add_line_fixed_scope = [x for x in start_add_line_fixed_scope\n if x < self.__fixed_start_add_line_num]\n\n # Getting the scope of end add line in the fixed version.\n after_end_add_line_fixed_scope = [x for x in end_add_line_fixed_scope\n if x > self.__fixed_end_add_line_num]\n\n # If the added code is a whole scope (e.g., whole function)\n if (\n # self.is_same_list(start_add_line_fixed_scope, end_add_line_fixed_scope) and\n len(before_start_add_line_fixed_scope) == 0 and\n len(after_end_add_line_fixed_scope) == 0\n ):\n scope_lines = start_add_line_fixed_scope\n whole_scope_parent = WholeScopeParent(self.__fixed_content_ast, self.__fixed_content_lines, scope_lines)\n parent_scope_lines = whole_scope_parent.get_parent_scope_lines()\n\n # Getting the scope of start add line in the fixed version.\n before_start_add_line_fixed_scope = [x for x in parent_scope_lines\n if x < self.__fixed_start_add_line_num]\n\n # Getting the scope of end add line in the fixed version.\n after_end_add_line_fixed_scope = [x for x in parent_scope_lines\n if x > self.__fixed_end_add_line_num]\n\n if len(before_start_add_line_fixed_scope) != 0:\n # Finding a line before the starting added line\n # in the fixed version, within its scope and\n # mapping it to the buggy version.\n tmp = before_start_add_line_fixed_scope.copy()\n tmp.reverse()\n buggy_one_line_before_start_add_line_num = -1\n for item in tmp:\n if item in self.__fixed_to_buggy_map.keys():\n if self.__fixed_content_lines[item - 1] != \"\":\n buggy_one_line_before_start_add_line_num = self.__fixed_to_buggy_map[item]\n break\n\n if buggy_one_line_before_start_add_line_num == -1:\n before_start_add_line = -1\n else:\n # Finding the scope of the before line in the buggy version.\n none_empty_one_line_before_start_add_line_buggy_scope = self.get_sorted_scope_line_numbers(\n self.__buggy_content_ast,\n self.__buggy_content_lines,\n buggy_one_line_before_start_add_line_num)\n\n # Getting the scope before start add line in the buggy version.\n before_start_add_line_buggy_scope = [x for x in none_empty_one_line_before_start_add_line_buggy_scope if\n x <= buggy_one_line_before_start_add_line_num]\n\n # Find a localizable line before start add line in the buggy version.\n tmp = before_start_add_line_buggy_scope.copy()\n tmp.reverse()\n before_start_add_line_selected = self.select_localizable_line_number(tmp)\n before_start_add_line = before_start_add_line_selected\n\n if len(after_end_add_line_fixed_scope) != 0:\n # Finding a line after the ending added line\n # in the fixed version, within its scope and\n # mapping it to the buggy version.\n buggy_one_line_after_end_add_line_num = -1\n for item in after_end_add_line_fixed_scope:\n if item in self.__fixed_to_buggy_map.keys():\n if self.__fixed_content_lines[item - 1] != \"\":\n buggy_one_line_after_end_add_line_num = self.__fixed_to_buggy_map[item]\n break\n\n if buggy_one_line_after_end_add_line_num == -1:\n after_end_add_line = -1\n else:\n # Finding the scope of the after line in the buggy version.\n one_line_after_end_add_line_buggy_scope = self.get_sorted_scope_line_numbers(self.__buggy_content_ast,\n self.__buggy_content_lines,\n buggy_one_line_after_end_add_line_num)\n\n # Getting the scope after end add line in the buggy version.\n after_end_add_line_buggy_scope = list(\n filter(lambda x: x >= buggy_one_line_after_end_add_line_num,\n one_line_after_end_add_line_buggy_scope))\n\n # Find a localizable line after end add line in the buggy version.\n after_end_add_line_selected = self.select_localizable_line_number(after_end_add_line_buggy_scope)\n after_end_add_line = after_end_add_line_selected\n\n return before_start_add_line, after_end_add_line\n\n def select_localizable_line_number(self, line_numbers: List) -> int:\n executable_line_object = ExecutableLine(self.__buggy_content_lines, self.__buggy_content_ast)\n for line in line_numbers:\n if executable_line_object.is_executable(line):\n return line\n return -1\n\n @staticmethod\n def get_high_level_none_decl_lines(ast_node: ast.AST,\n start_line_num: int,\n end_line_num: int) -> List[int]:\n\n line_numbers = list(range(start_line_num, end_line_num + 1))\n high_level_none_decl_visitor = HighLevelNoneDeclVisitor(ast_node, line_numbers)\n high_level_none_decl_visitor.visit(ast_node)\n high_level_none_decl_line_numbers = high_level_none_decl_visitor.get_line_numbers()\n\n return high_level_none_decl_line_numbers\n\n @classmethod\n def get_sorted_scope_line_numbers(cls, file_ast_tree: ast.AST, file_lines: List[str], line_number: int) -> List[\n int]:\n scope_finder_visitor = ScopeFinderVisitor(line_number)\n scope_finder_visitor.visit(file_ast_tree)\n scopes = scope_finder_visitor.get_scopes()\n if len(scopes) != 0:\n min_scope = cls.min_scope(scopes)\n if min_scope.node_type == ScopeType.Function:\n function_scope_lines = list(range(min_scope.node_range[0], min_scope.node_range[1] + 1))\n scope_line_numbers = function_scope_lines\n elif min_scope.node_type == ScopeType.Class:\n high_level_class_scope_lines = AddModeManager.get_high_level_none_decl_lines(min_scope.node_ast,\n min_scope.node_range[0],\n min_scope.node_range[1])\n scope_line_numbers = high_level_class_scope_lines\n else:\n raise Exception(\"This must not happen.\")\n else:\n global_scope_lines = AddModeManager.get_high_level_none_decl_lines(file_ast_tree,\n 1,\n len(file_lines))\n scope_line_numbers = global_scope_lines\n\n scope_line_numbers.sort()\n\n return scope_line_numbers\n\n @staticmethod\n def arg_max(items):\n return items.index(max(items))\n\n @staticmethod\n def arg_min(items):\n return items.index(min(items))\n\n @classmethod\n def min_scope(cls, scopes: List[ScopeItem]) -> ScopeItem:\n for item_x in scopes:\n for item_y in scopes:\n assert item_x == item_y or item_x.get_range_len() != item_y.get_range_len()\n if item_x.get_range_len() > item_y.get_range_len():\n assert item_x.contains(item_y)\n\n scope_len_list = [x.get_range()[1] - x.get_range()[0] for x in scopes]\n\n scope_len_min_index = cls.arg_min(scope_len_list)\n min_scope = scopes[scope_len_min_index]\n\n return min_scope\n\n @staticmethod\n def is_same_list(list_a: List, list_b: List) -> bool:\n if len(list_a) != len(list_b):\n return False\n\n for item_a in list_a:\n if item_a not in list_b:\n return False\n\n return True\n\n\ndef get_function_class_ast_node_start_end_lines(node):\n start_line_num = node.lineno\n end_line_num = node.end_lineno\n for item in node.decorator_list:\n start_line_num = min(start_line_num, item.lineno)\n\n return start_line_num, end_line_num\n\n\nclass ScopeFinderVisitor(ast.NodeVisitor):\n def __init__(self,\n target_line_number: int):\n self.__target_line_number = target_line_number\n self.__scopes = []\n\n def visit_FunctionDef(self, node: FunctionDef) -> Any:\n start_line_num, end_line_num = get_function_class_ast_node_start_end_lines(node)\n if start_line_num <= self.__target_line_number <= end_line_num:\n scope_item = ScopeItem((start_line_num, end_line_num),\n node,\n ScopeType.Function)\n self.__scopes.append(scope_item)\n self.generic_visit(node)\n\n def visit_AsyncFunctionDef(self, node: AsyncFunctionDef) -> Any:\n start_line_num, end_line_num = get_function_class_ast_node_start_end_lines(node)\n if start_line_num <= self.__target_line_number <= end_line_num:\n scope_item = ScopeItem((start_line_num, end_line_num),\n node,\n ScopeType.Function)\n self.__scopes.append(scope_item)\n self.generic_visit(node)\n\n def visit_ClassDef(self, node: ClassDef) -> Any:\n start_line_num, end_line_num = get_function_class_ast_node_start_end_lines(node)\n if start_line_num <= self.__target_line_number <= end_line_num:\n scope_item = ScopeItem((start_line_num, end_line_num),\n node,\n ScopeType.Class)\n self.__scopes.append(scope_item)\n self.generic_visit(node)\n\n def get_scopes(self):\n return self.__scopes\n\n\nclass HighLevelNoneDeclVisitor(ast.NodeVisitor):\n def __init__(self,\n ast_node: ast.AST,\n line_numbers: List[int]):\n self.ast_node = ast_node\n self.line_numbers = line_numbers.copy()\n\n def visit_FunctionDef(self, node: FunctionDef) -> Any:\n start_line_num, end_line_num = get_function_class_ast_node_start_end_lines(node)\n if node != self.ast_node:\n self._remove_lines_in_range(start_line_num, end_line_num)\n self.generic_visit(node)\n\n def visit_AsyncFunctionDef(self, node: AsyncFunctionDef) -> Any:\n start_line_num, end_line_num = get_function_class_ast_node_start_end_lines(node)\n if node != self.ast_node:\n self._remove_lines_in_range(start_line_num, end_line_num)\n self.generic_visit(node)\n\n def visit_ClassDef(self, node: ClassDef) -> Any:\n start_line_num, end_line_num = get_function_class_ast_node_start_end_lines(node)\n if node != self.ast_node:\n self._remove_lines_in_range(start_line_num, end_line_num)\n self.generic_visit(node)\n\n def _remove_lines_in_range(self,\n start_line_num: int,\n end_line_num: int):\n for line_number in range(start_line_num, end_line_num + 1):\n if line_number in self.line_numbers:\n self.line_numbers.remove(line_number)\n\n def get_line_numbers(self):\n return self.line_numbers\n\n\nclass ExecutableLine:\n def __init__(self,\n file_lines: List[str],\n file_ast: ast.AST):\n self.file_lines = file_lines\n self.file_ast = file_ast\n\n def is_executable(self, line):\n return (not self.is_comment(line) and\n not self.is_empty(line) and\n # not self.is_none_node(line) and\n not self.is_decl(line) and\n not self.is_docstring(line) and\n not self.is_decorator(line) and\n not self.is_only_brackets(line) and\n not self.is_else(line) and\n not self.is_finally(line))\n\n def is_comment(self, line):\n return self.file_lines[line - 1].strip().startswith(\"#\")\n\n def is_empty(self, line):\n return self.file_lines[line - 1].strip() == \"\"\n\n def is_decl(self, line):\n return (self.file_lines[line - 1].strip().startswith(\"def\") or\n self.file_lines[line - 1].strip().startswith(\"class\") or\n self.file_lines[line - 1].strip().startswith(\"async def\"))\n\n def is_docstring(self, line):\n line_text = self.file_lines[line - 1]\n if (line_text.startswith(\"'''\") or\n line_text.startswith('\"\"\"') or\n line_text.endswith(\"'''\") or\n line_text.endswith('\"\"\"')):\n return True\n\n docstring_visitor = DocstringVisitor(line)\n docstring_visitor.visit(self.file_ast)\n temp = docstring_visitor.is_docstring()\n return temp\n\n def is_decorator(self, line):\n return self.file_lines[line - 1].strip().startswith(\"@\")\n\n def is_only_brackets(self, line: int):\n line_text = self.file_lines[line - 1].strip()\n bracket_count = 0\n for c in line_text:\n if c in [\",\", \"[\", \"(\", \"{\", \"}\", \")\", \"]\"]:\n bracket_count += 1\n\n return len(line_text) == bracket_count\n\n def is_else(self, line):\n return self.file_lines[line - 1].strip().startswith(\"else\")\n\n def is_finally(self, line):\n return self.file_lines[line - 1].strip().startswith(\"finally\")\n\n\nclass DocstringVisitor(ast.NodeVisitor):\n def __init__(self,\n line: int):\n self.__line = line\n self.__is_string = False\n\n def visit_Expr(self, node: Expr) -> Any:\n if node.lineno <= self.__line <= node.end_lineno:\n if (hasattr(node, \"value\") and\n isinstance(node.value, ast.Constant) and\n hasattr(node.value, \"value\")):\n self.__is_string = True\n self.generic_visit(node)\n\n def is_docstring(self):\n return self.__is_string\n\n\nclass FunctionVisitor(ast.NodeVisitor):\n def __init__(self,\n lines: List[int]):\n self._lines = lines\n self._function_names = []\n self._function_nodes = []\n\n def visit_FunctionDef(self, node: FunctionDef) -> Any:\n if self._is_in_lines(node):\n function_name = self._get_function_name(node)\n self._function_names.append(function_name)\n self._function_nodes.append(node)\n self.generic_visit(node)\n\n def visit_AsyncFunctionDef(self, node: AsyncFunctionDef) -> Any:\n if self._is_in_lines(node):\n function_name = self._get_function_name(node)\n self._function_names.append(function_name)\n self._function_nodes.append(node)\n self.generic_visit(node)\n\n def get_function_names(self) -> List[str]:\n return self._function_names\n\n def get_function_nodes(self) -> List:\n return self._function_nodes\n\n def _is_in_lines(self, node) -> bool:\n start_line_num, end_line_num = get_function_class_ast_node_start_end_lines(node)\n for line in self._lines:\n if start_line_num <= line <= end_line_num:\n return True\n return False\n\n @staticmethod\n def _get_function_name(node):\n start_line_num, end_line_num = get_function_class_ast_node_start_end_lines(node)\n return f\"{node.name}::{start_line_num}::{end_line_num}\"\n\n\ndef get_functions_for_lines(module_content: str,\n lines: List[int]) -> List[str]:\n tree = ast.parse(module_content)\n function_visitor = FunctionVisitor(lines)\n function_visitor.visit(tree)\n functions = function_visitor.get_function_names()\n return functions\n\n\nclass WholeScopeParent:\n def __init__(self,\n content_ast: ast.AST,\n content_lines: List[str],\n scope_lines: List[int]):\n self._content_ast = content_ast\n self._content_lines = content_lines\n self._scope_lines = scope_lines\n\n def get_parent_scope_lines(self) -> List[int]:\n min_scope = self._get_min_scope()\n parent_scope_lines = self._get_parent_scope_lines(min_scope)\n return parent_scope_lines\n\n def _get_min_scope(self) -> ScopeItem:\n scope_list = []\n for line_item in self._scope_lines:\n scope_finder_visitor = ScopeFinderVisitor(line_item)\n scope_finder_visitor.visit(self._content_ast)\n scopes = scope_finder_visitor.get_scopes()\n for scope in scopes:\n if scope not in scope_list:\n scope_list.append(scope)\n\n min_scope = min(scope_list, key=lambda x: x.get_range_len())\n\n for scope_item in scope_list:\n assert scope_item == min_scope or scope_item.contains(min_scope)\n\n return min_scope\n\n def _get_parent_scope_lines(self, scope: ScopeItem) -> List[int]:\n scope_lines = scope.get_lines()\n parent_visitor = ParentVisitor(scope_lines, scope.get_ast())\n parent_visitor.visit(self._content_ast)\n min_parent_ast = parent_visitor.get_min_parent_ast()\n\n if min_parent_ast is None:\n line_numbers = list(range(1, len(self._content_lines) + 1))\n high_level_none_decl_visitor = HighLevelNoneDeclVisitor(self._content_ast, line_numbers)\n high_level_none_decl_visitor.visit(self._content_ast)\n parent_scope_lines = high_level_none_decl_visitor.get_line_numbers()\n else:\n line_numbers = self._get_node_lines(min_parent_ast)\n high_level_none_decl_visitor = HighLevelNoneDeclVisitor(min_parent_ast, line_numbers)\n high_level_none_decl_visitor.visit(self._content_ast)\n parent_scope_lines = high_level_none_decl_visitor.get_line_numbers()\n\n return parent_scope_lines\n\n @staticmethod\n def _get_node_lines(node) -> List[int]:\n start_line_num = node.lineno\n end_line_num = node.end_lineno\n\n if (isinstance(node, ast.FunctionDef) or\n isinstance(node, ast.AsyncFunctionDef) or\n isinstance(node, ast.ClassDef)):\n start_line_num, end_line_num = get_function_class_ast_node_start_end_lines(node)\n\n node_lines = list(range(start_line_num, end_line_num + 1))\n\n return node_lines\n\n\nclass ParentVisitor(ast.NodeVisitor):\n def __init__(self,\n scope_lines: List[int],\n scope_ast: ast.AST):\n self._lines = scope_lines\n self._scope_ast = scope_ast\n self._parent_nodes = []\n\n def visit(self, node: AST) -> Any:\n if self._contains_all_lines(node):\n self._parent_nodes.append(node)\n self.generic_visit(node)\n\n def _contains_all_lines(self, node) -> bool:\n if not hasattr(node, \"lineno\") or not hasattr(node, \"end_lineno\"):\n return False\n\n if node == self._scope_ast:\n return False\n\n start_line_num = node.lineno\n end_line_num = node.end_lineno\n\n if (isinstance(node, ast.FunctionDef) or\n isinstance(node, ast.AsyncFunctionDef) or\n isinstance(node, ast.ClassDef)):\n start_line_num, end_line_num = get_function_class_ast_node_start_end_lines(node)\n\n for line in self._lines:\n if not (start_line_num <= line <= end_line_num):\n return False\n return True\n\n def get_min_parent_ast(self) -> Optional[ast.AST]:\n if len(self._parent_nodes) == 0:\n return None\n else:\n min_parent = min(self._parent_nodes, key=lambda x: self._node_size(x))\n return min_parent\n\n @staticmethod\n def _node_size(node) -> int:\n start_line_num = node.lineno\n end_line_num = node.end_lineno\n\n if (isinstance(node, ast.FunctionDef) or\n isinstance(node, ast.AsyncFunctionDef) or\n isinstance(node, ast.ClassDef)):\n start_line_num, end_line_num = get_function_class_ast_node_start_end_lines(node)\n\n node_size = end_line_num - start_line_num + 1\n\n return node_size\n","repo_name":"mohrez86/fauxpy-experiments","sub_path":"first_round_selection/ast_manager.py","file_name":"ast_manager.py","file_ext":"py","file_size_in_byte":23487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73263027464","text":"import unittest\nimport mock\nfrom usquam.src.session_interaction_handler import SessionInteractionHandler\n\nclass TestSessionInteraction(unittest.TestCase):\n\n def setUp(self):\n SessionInteractionHandler.handlers = {'Answer': mock.MagicMock(return_value=\"foo\"), \n 'CancelTask': mock.MagicMock(return_value=\"bar\")}\n\n @mock.patch('usquam.src.session_interaction_handler.IntentParser')\n def test_handleInput(self, mock_intent):\n mock_intent.parse.return_value = {'intent_type': 'Answer'}\n result = SessionInteractionHandler.handleInput(None, \"hello\")\n \n mock_intent.parse.assert_called_with(\"hello\", [\"Answer\", \"CancelTask\"])\n self.assertEquals(result, 'foo')\n SessionInteractionHandler.handlers['Answer'].assert_called_with(None, \"hello\")","repo_name":"Ekula/uSquam_backend","sub_path":"test/unit/src/test_session_interaction_handler.py","file_name":"test_session_interaction_handler.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31310663456","text":"\ndef get_smallest_numerical_value(phone_numbers):\n map_num_value_phone_nb={}\n smallest_num_value=10**10\n for phone_number in phone_numbers:\n num_value=int(phone_number.replace('-', ''))\n map_num_value_phone_nb[num_value]=phone_number\n if num_value5 mins: 150 cents every started min:5:00 = 5 x 150, 5:01 = 6 x 150\n elif duration_s==0:\n cost = 150 * duration_mins\n else:\n cost = 150 * (duration_mins+1)\n if phone_nb in phone_cost:\n phone_cost[phone_nb]+=cost\n phone_duration[phone_nb]+=duration_secs\n else:\n phone_cost[phone_nb]=cost\n phone_duration[phone_nb]=duration_secs\n\n # Now get Max:\n total_cost = 0\n good_friends_duration=-1\n for phone_nb in phone_cost:\n total_cost+=phone_cost[phone_nb]\n if phone_duration[phone_nb]>good_friends_duration:\n good_friends_duration=phone_duration[phone_nb]\n # good_friend_cost=phone_cost[phone_nb]\n\n # Get all friends with the same duration\n # good_friend_cost=-1\n good_friends_numbers=[]\n for phone_nb in phone_duration:\n if phone_duration[phone_nb]==good_friends_duration:\n good_friends_numbers.append(phone_nb)\n\n # Longest total duration = free\n # Tie...\n free_number=get_smallest_numerical_value(good_friends_numbers)\n\n return total_cost-phone_cost[free_number]\n\n_str= \"00:01:07,400-234-090\\n00:05:01,701-080-080\\n00:05:00,400-234-090\"\n# _str= \"00:01:07,400-234-090\\n00:01:07,701-080-080\"\n\nnbs= [\"400-234-090\", \"701-080-080\"]\nprint(get_smallest_numerical_value(nbs))\nprint(solution( _str))\n","repo_name":"bonnemai/MyPortfolio","sub_path":"iress3.py","file_name":"iress3.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15804900899","text":"import sys\nimport csv\nname=sys.argv[1]\nfrom pyspark import SparkContext, SparkConf\ndef splitter(line):\n for column in csv.reader([line],delimiter=','):\n return column[6]\ndef sumfunc(x,y):\n return x+y\nconf = SparkConf().setAppName('Kia_bigdata_lab').setMaster('local')\nsc = SparkContext(conf=conf)\ntrees=sc.textFile(name).map(splitter).filter(lambda x:x)\nheader=trees.first()\ntrees1=trees.filter(lambda x:x!=header).map(lambda x:(x,1))\nnumber=trees1.reduceByKey(lambda x,y:x+y).sortByKey().collect()\nfor it in range(len(number)):\n print(number[it][0]+ ': '+str(number[it][1]))","repo_name":"kiababashahi/Data-frames-and-RDDS","sub_path":"answers/uniq_parks_counts_rdd.py","file_name":"uniq_parks_counts_rdd.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11932761391","text":"import requests\nfrom operator import itemgetter\n\n\nurl = \"https://hacker-news.firebaseio.com/v0/topstories.json\"\nr = requests.get(url)\n\n\nsubmission_ids = r.json()\nsubmission_dicts = []\n\nfor submission_id in submission_ids[:10]:\n # doing another call for each one\n\n url = (f'https://hacker-news.firebaseio.com/v0/item/{submission_id}.json')\n submission_r = requests.get(url)\n response_dict = submission_r.json()\n\n\n submission_dict = {\n 'title': response_dict['title'],\n 'link': f\"http://news.ycombinator.com/item?id={submission_id}\",\n 'comments': response_dict.get('descendants', 0)\n }\n\n submission_dicts.append(submission_dict)\n\nsubmission_dicts = sorted(submission_dicts, key=itemgetter('comments'),reverse=True)\n\nfor each in submission_dicts:\n print('#' * 80, \"\\n\")\n print(f\"Title: {each['title']}\")\n print(f\"Link: {each['link']}\")\n print(f\"Comments: {each['comments']}\\n\")\n","repo_name":"ChrystianRubio/Api-Hacker-News","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18466289965","text":"import torch\n\n# **小练习**\n# 访问[文档](http://pytorch.org/docs/0.3.0/tensors.html)了解 tensor 更多的 api,实现下面的要求\n# 创建一个 float32、4 x 4 的全为1的矩阵,将矩阵正中间 2 x 2 的矩阵,全部修改成2\n\n# 答案\nx = torch.ones(4, 4).float()\nx[1:3, 1:3] = 2\nprint(x)\n","repo_name":"chenmeilong/SRai","sub_path":"pytorch/1.基础知识/6.tensor操作练习.py","file_name":"6.tensor操作练习.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"73218608265","text":"\"\"\" Algorithms for checking if integer is a palindrome. \"\"\"\n\nimport math\nfrom reverse_digits import digit_reverse_math\n\n\ndef palindrome(x):\n \"\"\" Checks if int x is a palindrome.\n Args:\n x: int\n Returns:\n True if x is a palindrome else False.\n \"\"\"\n # If x < 0, x cannot be a palindrome\n if x <= 0:\n return x == 0\n # Get number of digits using built-in library math\n num_digits = math.floor(math.log10(x)) + 1\n # msd_mask used to extract most significant digit\n msd_mask = 10**(num_digits - 1)\n # Check if digits at each end match for each possibility\n for i in range(num_digits // 2):\n if x // msd_mask != x % 10:\n return False\n # Remove most significant digit\n x %= msd_mask\n # Remove least significant digit\n x //= 10\n # x is now two digits smaller,\n # so msd_mask must also be two digits smaller\n msd_mask //= 100\n return True\n\n\ndef palindrome_rev(x):\n \"\"\" Checks if int x is a palindrome.\n Args:\n x: int\n Returns:\n True if x is a palindrome else False.\n \"\"\"\n # If x < 0, x cannot be a palindrome\n if x <= 0:\n return x == 0\n # Simply check if x equals its reverse\n y = digit_reverse_math(x)\n if x == y:\n return True\n return False\n\n\nprint(palindrome(123454321))\nprint(palindrome_rev(123454321))\n","repo_name":"arcaputo3/algorithms","sub_path":"EPI/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"85261819","text":"# 문제가 원형으로 주어졌을 경우, 일직선으로 만드는 방법도 생각해보자.\r\nfrom itertools import permutations\r\n\r\ndef solution(n, weak, dist):\r\n answer = len(dist)+1\r\n weak_point = len(weak) # 초기 weak 지점 개수\r\n\r\n for i in range(len(weak)): # 원형을 일직선으로 만들어준다.\r\n weak.append(weak[i] + n)\r\n\r\n for start in range(weak_point): # 각 지점에서 출발하는 모든 경우 (시작지점이 0번째 취약점~마지막 취약점)\r\n # 일직선으로 만든 리스트에서 초기 weak 지점 개수만큼을 커버 하면 된다.\r\n for fr in list(permutations(dist,len(dist))): # 친구들을 보내는 모든 경우의 수.\r\n cnt = 1 # 정찰친구 카운트\r\n pos = weak[start] + fr[cnt-1] # 시작지점부터 fr[인덱스]에 해당하는 친구가 가능한 거리\r\n for idx in range(start,start+weak_point): # 시작 취약점부터 마지막 취약점 까지 (개수) 초기 len(weak)\r\n if pos < weak[idx]: # 현재 친구가 남은 취약점을 못돌때\r\n cnt+=1 # 친구 한명더 투입\r\n if cnt > len(dist): # 친구 수는 초기 친구수를 초과할 수 없음.\r\n break\r\n pos = weak[idx]+fr[cnt-1] # 투입한 친구의 position 계산. 반복\r\n answer = min(answer,cnt)\r\n\r\n print(answer)\r\n if answer > len(dist): # 최소값이 기존 친구 수보다 크면 모두 방문 불가\r\n return -1\r\n return answer\r\n\r\nn=200\r\nweak=[0, 10, 50, 80, 120, 160]\r\ndist=[1, 10, 5, 40, 30]\r\nsolution(n,weak,dist)","repo_name":"Areum0921/Abox","sub_path":"This is a coding test with python/page 335.py","file_name":"page 335.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72094832264","text":"from application import utils\n\n\ndef parse_request(request_data, ip_port):\n \"\"\"解析请求报文,返回客户端资源路径\"\"\"\n request_text = request_data.decode()\n loc = request_text.find(\"\\r\\n\")\n request_line = request_text[:loc]\n request_line_list = request_line.split(\" \")\n file_path = request_line_list[1]\n print(\"[%s]正在请求:%s\" % (str(ip_port), file_path))\n\n # 设置默认首页\n if file_path == \"/\":\n file_path = \"/index.html\"\n\n return file_path\n\n\ndef application(current_dir, request_data, ip_port):\n # 调用 parse_request函数,解析请求协议,返回请求的资源路径\n file_path = parse_request(request_data, ip_port)\n\n # 定义变量保存资源路径\n resource_path = current_dir + file_path\n\n try:\n # 通过 with open 读取文件\n with open(resource_path, \"rb\") as file:\n # 把读取的文件内容返回给客户端\n response_body = file.read()\n\n # 调用 utils 模块的 create_http_response 函数,拼接响应协议\n response_data = utils.creat_http_response(\"200 OK\", response_body)\n except Exception as e:\n\n # 2)响应的内容为错误\n response_body = \"Error! (%s)\" % str(e)\n # 3)把内容转换为字节码\n response_body = response_body.encode()\n\n response_data = utils.creat_http_response(\"404 Not Found!\", response_body)\n\n return response_data\n","repo_name":"cess-100/python-study2","sub_path":"day05/application/app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"25532260919","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport requests\n\ndef spainData():\n df = pd.read_csv('https://raw.githubusercontent.com/andrzejmp/some_codes/main/python/tourism/data.csv')\n\n years = df[\"YEAR\"]\n country = df[\" SP\"]\n\n plt.style.use('seaborn')\n fig, ax = plt.subplots()\n ax.bar(years, country)\n ax.set_title(\"Data on tourism in Spain by year\", fontsize=24)\n ax.set_xlabel('Years', fontsize=16)\n ax.set_ylabel(\"Number of people\", fontsize=16)\n ax.tick_params(axis='both', which='major', labelsize=16)\n plt.show()\n\n\nspainData()\n","repo_name":"jesusma3009/JaroslawProjects","sub_path":"python/Tourism Python/tourism_spain.py","file_name":"tourism_spain.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5945155124","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 7 17:42:15 2022\r\n\r\n@author: R\r\n\"\"\"\r\n\r\nimport csv\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nfrom webdriver_manager.chrome import ChromeDriverManager\r\ndriver = webdriver.Chrome(ChromeDriverManager().install())\r\n\r\ndef get_url(search,i):\r\n #temp='https://www.amazon.in/s?k={}&crid=1YJSNNU54AGDR&sprefix=laptop%2Caps%2C451&ref=nb_sb_noss_1'\r\n temp='https://www.amazon.in/s?k={}&page={}&qid=1651923885&ref=sr_pg_2'\r\n search=search.replace(\" \", \"+\")\r\n return temp.format(search,i)\r\n\r\ndef extract(item,pages):\r\n record=[]\r\n for i in range(1,pages+1):\r\n url=get_url(item,i)\r\n print(\"\")\r\n print(url)\r\n driver.get(url)\r\n #https://www.amazon.in/s?k=laptops&page=2&qid=1651923885&ref=sr_pg_2\r\n laptop_container=BeautifulSoup(driver.page_source,'html.parser')\r\n result=laptop_container.find_all('div',{'data-component-type':'s-search-result'})\r\n total_item=len(result)\r\n \r\n for j in range(total_item):\r\n url2=\"http://www.amazon.in\"+result[j].h2.a.get('href')\r\n driver.get(url2)\r\n laptop=BeautifulSoup(driver.page_source,'html.parser')\r\n #print(laptop)\r\n print(j+1,end=\" \")\r\n ###\r\n name=laptop.find('span',class_='a-size-large product-title-word-break')\r\n product=name.text\r\n ASIN=laptop.find(id='productDetails_detailBullets_sections1').text.strip()\r\n ASIN_no=ASIN[7:17]\r\n p_name=product.split(',')[0].strip()\r\n try:\r\n was_price=laptop.find('span',class_='a-price a-text-price a-size-base').text\r\n pp=len(was_price)\r\n was_price=was_price[:pp//2][1:]\r\n now_price=laptop.find('span', class_=\"a-offscreen\").text[1:]\r\n except:\r\n was_price='N/A'\r\n now_price='N/A'\r\n \r\n try:\r\n review_count=laptop.find(id='acrCustomerReviewText').text\r\n rating=laptop.find('span', class_='a-size-medium a-color-base').text\r\n except AttributeError:\r\n review_count='N/A'\r\n rating='N/A'\r\n ########\r\n desc=laptop.find(id='productDetails_techSpec_section_1').text\r\n \r\n \r\n rec=(p_name,rating,review_count,was_price,now_price,desc,ASIN_no,url2)\r\n record.append(rec)\r\n \r\n with open(f'{item}.csv','w',newline=\"\",encoding='utf-8') as f:\r\n writer=csv.writer(f)\r\n writer.writerow(['Product Name', 'Star rating', 'Review Rating','Was Price','Current price','Product Description', 'ASIN Number',' product URL'])\r\n writer.writerows(record)\r\n\r\n\r\nif __name__==\"__main__\":\r\n item=input('Search: ')\r\n pages=int(input(\"How Many pages?: \"))\r\n extract(item,pages)\r\n driver.close()\r\n \r\n","repo_name":"DeAdak/WebScraping","sub_path":"WebScrapingHackathon.py","file_name":"WebScrapingHackathon.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20434659901","text":"# -*- coding: utf-8 -*-\n\"\"\"Tools : database of spectra, line survey, interface with Cantera.\n\"\"\"\n\n\nfrom .database import SpecDatabase, load_spec, plot_spec, save\nfrom .gascomp import get_eq_mole_fraction\nfrom .slit import (\n convolve_with_slit,\n crop_slit,\n get_effective_FWHM,\n get_FWHM,\n plot_slit,\n recenter_slit,\n)\n\n__all__ = [\n \"SpecDatabase\",\n \"load_spec\",\n \"plot_spec\",\n \"save\",\n \"get_eq_mole_fraction\",\n \"plot_slit\",\n \"get_effective_FWHM\",\n \"get_FWHM\",\n]\n","repo_name":"radis/radis","sub_path":"radis/tools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":173,"dataset":"github-code","pt":"81"} +{"seq_id":"31253885457","text":"\n### This script will allow me to run networks with different parameters on the fly,\n### facilitating quick experiments.\n\nimport argparse\nimport sys\n\n\n\nclass ArgParser(argparse.ArgumentParser):\n def error(self, message):\n sys.stderr.write('ERROR: %s\\n\\n' % message)\n self.print_help()\n sys.exit(2)\n\n\nif __name__ == '__main__':\n\n parser = ArgParser(description='Trains a traffic sign classifier', add_help=False)\n dummy = parser.add_argument('-n', '--network', type=str, help='Name of the network to run', default='LeNet')\n dummy = parser.add_argument('-l', '--learning_rate', type=float, help='Learning rate to train with', default=0.001)\n dummy = parser.add_argument('-e', '--epochs', type=int, help='Number of epochs to train', default=20)\n dummy = parser.add_argument('-k', '--keep_prob', type=float, help='Keep probability when using dropout', default=1.0)\n dummy = parser.add_argument('-t', '--training_set', type=str, help='Training set to use', default='../data/traffic-signs-data/train.p')\n dummy = parser.add_argument('-p', '--preprocess', type=str, help='Preprocess data with this function', default='')\n\n args = parser.parse_args()\n\n from Traffic_Sign_Classifier import *\n\n if not args.network in dir():\n print(\"Network '{}' NOT FOUND\".format(args.network))\n sys.exit(-1)\n\n if args.preprocess and not args.preprocess in dir():\n print(\"Preprocess function '{}' NOT FOUND\".format(args.preprocess))\n sys.exit(-1)\n\n X_train, y_train = read_data_file(args.training_set)\n X_valid, y_valid = read_data_file(validation_file)\n\n image_shape = X_train[0].shape\n\n x = tf.placeholder(tf.float32, (None,) + image_shape)\n y = tf.placeholder(tf.int32, (None))\n\n keep_prob = tf.placeholder(tf.float32)\n\n placeholders = (x, y, keep_prob)\n\n # build model identifier\n model_id_parts = [args.network]\n training_id = args.training_set.split('/')[-1].split('.')[0].replace('train_', '').replace('train', '')\n\n if training_id:\n model_id_parts.append(training_id)\n\n if args.preprocess:\n model_id_parts.append(args.preprocess)\n X_train = globals()[args.preprocess](X_train)\n X_valid = globals()[args.preprocess](X_valid)\n\n model_id_parts.append('e{}'.format(args.epochs))\n model_id_parts.append('l{}'.format(str(args.learning_rate).replace('.', '_')))\n\n keep_prob_val = max(min(args.keep_prob, 1.0), 0.01)\n\n data = (X_train, y_train, X_valid, y_valid)\n\n if 'dropout' in args.network:\n model_id_parts.append('k{}'.format(str(keep_prob_val).replace('.', '_')))\n train_network(globals()[args.network](x, keep_prob), '_'.join(model_id_parts), data, placeholders, rate=args.learning_rate, epochs=args.epochs, keep_prob_val=keep_prob_val)\n else:\n train_network(globals()[args.network](x), '_'.join(model_id_parts), data, placeholders, rate=args.learning_rate, epochs=args.epochs)\n\n","repo_name":"yonomitt/traffic-sign-classifier","sub_path":"project2.py","file_name":"project2.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13297479181","text":"\"\"\"\nUtility function for the OSG demo\n\"\"\"\n\nimport numpy as np\nimport os\nfrom misshapen import nonshape\n\n \ndef save_Ps_and_Ts(data_filename, Fs = 1000, f_range = (6,12)):\n \"\"\"\n Saves the indices corresponding to oscillatory peaks and trough into a new numpy file\n \"\"\"\n \n # Load data\n x = np.load(data_filename)\n \n # Calculate peaks and troughs\n Ps, Ts = nonshape.findpt(x, f_range, Fs = Fs)\n\n # Save peaks and troughs\n save_dict = {'Ps':Ps, 'Ts':Ts}\n for key in save_dict.keys():\n filename_save = './out/'+key+'_'+os.path.basename(data_filename)\n np.save(filename_save, save_dict[key])\n \n","repo_name":"srcole/demo_OSG_python","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"4663921402","text":"#This is a script that downloads the .csv file which contains the vulnerabilities from the official exploit db repo in github and searches the vuls in it\n\nimport csv,re,os,subprocess\nimport datetime\nimport logging\n\npathtospiders = os.path.abspath(os.getcwd()) + \"/web_scraping/spiders\"\npathtorapid7 = os.path.abspath(os.getcwd()) + \"/web_scraping/spiders/rapid7.csv\"\npathtoexploits = os.path.abspath(os.getcwd()) + \"/web_scraping/spiders/files_exploits.csv\"\npathtolog = os.path.abspath(os.getcwd()) + \"/web_scraping/spiders/agent.log\"\n\nsubprocess.call(\"wget https://raw.githubusercontent.com/offensive-security/exploitdb/master/files_exploits.csv\", shell=True, cwd=pathtospiders)\npackages = os.popen(\"dpkg -l | grep '^ii' | awk '{print $2}'\").read().split('\\n') #get the packages' name using dpkg\nversions = os.popen(\"dpkg -l | grep '^ii' | awk '{print $3}'\").read().split('\\n') #get the packages' version\nkernel_version= os.popen('uname -r').read() #get the linux kernel version\nkernel_version=('linux kernel' + ' '+kernel_version[:6]) #keep only the version of linux kernel\n\ntry:\n \n for i,j in zip(packages,versions):\n j=j[:3]\n \n results=(i+\" \"+(re.sub(r'([^\\w\\s]|_)+(?=\\s|$)', '', j))) #package + version\n \n with open(pathtoexploits) as f_obj:\n reader = csv.reader(f_obj, delimiter=',')\n \n for line in reader: #Iterates through the rows files_exploits.csv\n\n if kernel_version in line[2]: #search for the linux kernel vulnerability into the.csv file\n logging.basicConfig(filename=pathtolog, filemode='w', format='%(name)s - %(levelname)s - %(message)s')\n logging.warning('There is a vulnerability in the current kernel version')\n\n if (results in line[2]) and (results != ' '): #If the string you want to search is in the row\n logging.basicConfig(filename=pathtolog, filemode='w', format='%(name)s - %(levelname)s - %(message)s')\n \n logging.warning('Description:' + ' ' + line[2])\n #logging.warning('ID:' + ' ' + line[0])\n with open(pathtorapid7) as f_obj:\n reader = csv.reader(f_obj, delimiter=',')\n \n for line in reader: #Iterates through the rows of your csv\n\n if kernel_version in line[0]: #search for the linux kernel vulnerability into the.csv file\n logging.basicConfig(filename=pathtolog, filemode='w', format='%(name)s - %(levelname)s - %(message)s')\n logging.warning('There is a vulnerability in the current kernel version')\n\n if results in line[0] and ('Huawei' not in line[0]) and ('OS X' not in line[0]) and ('IBM' not in line[0]) and ('Sun' not in line[0]) and ('ELSA' not in line[0]) and ('Apple' not in line[0]) and ('Oracle' not in line[0]) and ('Gentoo' not in line[0]) and (('Amazon' not in line[0]) and ('Alpine' not in line[0]) and ('RHSA' not in line[0]) and ('AIX' not in line[0]) and ('Moodle' not in line[0]) and ('USN' not in line[0]) and ('DSA' not in line[0]) and ('HP' not in line[0])): \n\n logging.basicConfig(filename=pathtolog, filemode='w', format='%(name)s - %(levelname)s - %(message)s')\n \n \n logging.warning('Description:' + ' ' + line[0])\n\nexcept KeyboardInterrupt:\n pass\n\n\n\t\n\n \n","repo_name":"nikosf95/Vulnerability_Checker","sub_path":"linux_version/web_scraping/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4364346946","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 19 16:19:04 2019\nTWITTER SENTIMENT ANALYSIS\n@author: rajat\n\nRecommender system\n\n1.INstall dependencies\n2.Write script\n\"\"\"\nimport numpy as np\nfrom lightfm.datasets import fetch_movielens\nfrom lightfm import LightFM\n\n# fetch data and format it\ndata = fetch_movielens(min_rating=4.2)\n\n# printing train and testing data\nprint('Printing training Data:')\nprint(repr(data['train']))\n\nprint('Testing Data:')\nprint(repr(data['test']))\n\n# create model\n\nmodel = LightFM(learning_rate=0.02,loss='warp')\n \n# train model\nmodel.fit(data['train'], epochs=30, num_threads=5)\n\ndef sample_recommendation(model, data, user_ids):\n # number of users nd movies\n n_users, n_items = data['train'].shape\n \n # generate recommmendation for each user\n for userid in user_ids:\n # movies they already like\n known_positives = data['item_labels'][data['train'].tocsr()[userid].indices]\n \n # movies our model predict they will like\n scores = model.predict(userid, np.arange(n_items))\n \n top_items = data['item_labels'][np.argsort(-scores)]\n \n print(\"User %s\"% userid)\n print(\" Known positives:\")\n for x in known_positives[:3]:\n print(\" %s\"%x)\n print(\" Recommendations:\")\n for x in top_items[:3]:\n print(\" %s\"%x) \n \nsample_recommendation(model, data,[3,15,42]) ","repo_name":"rajat81/Machine_Learning","sub_path":"Recommendation_system/movie_pred.py","file_name":"movie_pred.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74398577225","text":"import math\nimport numpy as np\nimport pandas as pd\nfrom collections import OrderedDict,defaultdict\n\nEPS=1e-8\nrui=lambda u:(lambda:float(np.random.randint(u[0],u[1])))\nruf=lambda u:(lambda:float(np.random.uniform(u[0],u[1])))\nPRO_STATE_NAMES=['er', 'econs', 'rcons', 'B', 'p', 'g', 'twe', 'ler', 'w', 'alpha','PF','Aq', 'x', 'y', 'vx','vy']\n\ndef fpro_config(dic):\n config={}\n i=['er','econs','rcons','B','p','g']\n f=['F','Q','twe','ler']\n for item in i:\n config[item]=ruf(dic[item]) #change\n for item in f:\n config[item]=ruf(dic[item])\n config['w']=float(dic['w'])\n config['alpha']=float(dic['alpha'])\n config['x']=dic['x']\n config['y']=dic['y']\n return config\n\ndef ftask_config(dic):\n config={}\n f=['rz','ez']\n for item in f:\n config[item]=ruf(dic[item])\n return config\n\ndef fjob_config(dic):\n config={}\n f=['time','womiga','sigma']\n for item in f:\n config[item]=ruf(dic[item])\n config['num']=lambda:int(np.random.randint(dic['num'][0],dic['num'][1]+1))\n return config\n\ndef floc_config():\n def generate(num_pros,maxnum_tasks):\n num_pro_choices=np.random.randint(1,num_pros+1,maxnum_tasks)\n loc=np.zeros((num_pros,maxnum_tasks),dtype='float')\n loc[:]=EPS\n for i in range(maxnum_tasks):\n num_pro_choice=num_pro_choices[i]\n pro_choice=np.random.choice(np.arange(num_pros,dtype='int'),num_pro_choice,False)\n loc[pro_choice,i]=1\n return loc\n return generate\n\nclass PROCESSOR:\n def __init__(self,config:dict):\n '''F,Q,er,econs,rcons,B,p,g,d,w,alpha,twe,ler'''\n self.pro_dic=OrderedDict()\n for k in config:\n if callable(config[k]) and not k=='Q':\n self.pro_dic[k]=config[k]()\n else:\n self.pro_dic[k]=config[k]\n self.Exe=0\n self.UExe=0\n self.cal_PF()\n self.sum_Aq=0\n self.Nk=0\n self.cal_Aq()\n self.t=0\n \n def cal_PF(self):\n self.PF=(self.Exe+1)/(self.Exe+self.UExe+2)\n \n def cal_Aq(self):\n self.Aq=(self.sum_Aq+1)/(self.Nk+2)\n \n def cal_squard_d(self,t):\n self.d=self.pro_dic['x'](t)**2+self.pro_dic['y'](t)**2\n return self.d\n \n def cal_v(self,t,tp):\n return self.pro_dic['x'](t)-self.pro_dic['x'](tp),self.pro_dic['y'](t)-self.pro_dic['y'](tp)\n \n def __call__(self,tin:float,task:dict,sigma:float):\n te=[x/self.pro_dic['er'] for x in task['ez']]\n tp=tin-self.t\n self.t=tin\n twe=self.pro_dic['twe']\n ler=self.pro_dic['ler']\n ler=min(max(ler+twe-tp,0),ler)\n twe=max(twe-tp,0)\n Q,finish=0,True\n for i in range(len(te)):\n if np.random.rand()>self.pro_dic['F']:\n self.UExe+=1\n finish=False\n else:\n Q+=self.pro_dic['Q']()\n self.Nk+=1\n self.Exe+=1\n twe+=te[i]\n twr=max(ler-te[i],0)\n t=tin+twe+twr\n tr=self.cal_tr(task['rz'][i],t)\n ler=twr+tr\n self.pro_dic['twe']=twe\n self.pro_dic['ler']=ler\n self.cal_PF()\n Q*=sigma\n self.sum_Aq+=Q\n self.cal_Aq()\n if twe+ler==0:\n print('t_here!')\n return Q,twe+ler,np.sum(te)*self.pro_dic['econs']+np.sum(tr)*self.pro_dic['rcons'],finish\n \n def cal_tr(self,rz,t):\n r=self.pro_dic['B']*np.log2(\n 1+self.pro_dic['p']*self.pro_dic['g']/\n (self.cal_squard_d(t)**(self.pro_dic['alpha']/2)\n *self.pro_dic['w']**2))\n return rz/r\n\nclass PROCESSORS:\n def __init__(self,pro_configs:list):\n self.num_pros=len(pro_configs)\n self.pros=[PROCESSOR(pro_config) for pro_config in pro_configs]\n \n def __call__(self,tin:float,tasks:dict,action:np.ndarray,womiga:float,sigma:float):\n for i,rz in enumerate(tasks['rz']):\n break\n num_tasks=i if not rz else i+1\n tasks['ez']=tasks['ez'][:num_tasks]\n tasks['rz']=tasks['rz'][:num_tasks]\n act_list=[(i,action[0][i],action[1][i]) for i in range(num_tasks)]\n act_list=sorted(act_list,key=lambda x:x[-1])\n Q,task_time,cons,finish=0,0,0,True\n for i,pro in enumerate(self.pros):\n task={}\n task['ez'],task['rz']=[],[]\n for item in act_list:\n if item[1]==i:\n task['ez'].append(tasks['ez'][item[0]])\n task['rz'].append(tasks['rz'][item[0]])\n if len(task['ez']):\n Q1,task_time1,cons1,finish1=pro(tin,task,sigma)\n if not finish1:\n finish=finish1\n Q+=Q1\n task_time=max(task_time,task_time1)\n cons+=cons1\n if task_time==0:\n print('ta_here!')\n dic={}\n dic['Q'],dic['T'],dic['C'],dic['F']=Q,task_time*womiga,cons,finish\n return dic\n\nclass JOB:\n def __init__(self,maxnum_tasks:int,task_configs:list,job_config:dict):\n self.maxnum_tasks=maxnum_tasks\n self.task_configs=task_configs\n self.job_config=job_config\n self.job_index=0\n self.tin=0\n\n def __call__(self):\n self.job_index+=1\n num_tasks=self.job_config['num']()\n tasks=defaultdict(list)\n for i,config in enumerate(self.task_configs):\n if i=self.maxnum_episode:\n self.done=1\n print(str(self.name)+' done')\n if self.reset_step:\n l=['er','econs','rcons','B','p','g']\n for pro,pro_conf in zip(self.processors.pros,self.pro_configs):\n for key in l:\n pro.pro_dic[key]=pro_conf[key]()\n return self.send(),reward,self.done,self.over,None\n\nclass RANDOM_AGENT:\n def __init__(self,maxnum_tasks):\n self.maxnum_tasks=maxnum_tasks\n \n def take_action(self,state):\n action=np.zeros((2,self.maxnum_tasks),dtype='int')\n action[1]=np.random.permutation(np.arange(self.maxnum_tasks))\n sub_loc=state[0][0,0,:,-self.maxnum_tasks:]\n num_pros=sub_loc.shape[0]\n for j,col in enumerate(sub_loc.T):\n action[0][j]=np.random.choice(np.arange(num_pros),p=col/col.sum())\n return action\n\nclass OTHER_AGENT:\n def __init__(self,choice,maxnum_tasks):\n self.choice=choice\n self.maxnum_tasks=maxnum_tasks\n \n def take_action(self,state):\n action=np.zeros((2,self.maxnum_tasks),dtype='int')\n action[1]=np.random.permutation(np.arange(self.maxnum_tasks))\n sub_loc=state[0][0,0,:,-self.maxnum_tasks:]\n pro_status=state[0][0,0,:,:-self.maxnum_tasks]\n action[0]=self.choice(sub_loc,pro_status)\n return action\n\ndef random_choice(sub_loc,_):\n num_pros=sub_loc.shape[0]\n return [np.random.choice(np.arange(num_pros),p=col/col.sum()) for col in sub_loc.T]\n\ndef short_twe_choice(sub_loc,pro_status):\n num_pros=sub_loc.shape[0]\n num_tasks=sub_loc.shape[1]\n l_pros=[(i,pro_status[i,PRO_STATE_NAMES.index('twe')]+pro_status[i,PRO_STATE_NAMES.index('ler')]) for i in range(num_pros)]\n l_pros.sort(key=lambda x:x[1])\n l_pros_index=[x[0] for x in l_pros]\n act=[-1 for _ in range(num_tasks)]\n task_visited=[0 for _ in range(num_tasks)]\n i=j=k=count=0\n while countnum_tasks-count:\n i=(i+1)%num_pros\n k=0\n return act\n \nif __name__=='__main__':\n '''F,Q,er,econs,rcons,B,p,g,d,w,alpha,twe,ler'''\n #np.random.seed(1)\n np.set_printoptions(2)\n pro_dic={}\n pro_dic['F']=(0.9,0.99)\n pro_dic['Q']=(0.7,1)\n pro_dic['er']=(10,20)\n pro_dic['econs']=(1,5)\n pro_dic['rcons']=(1,5)\n pro_dic['B']=(10,20)\n pro_dic['p']=(10,20)\n pro_dic['g']=(10,20)\n def fx():\n h=np.random.random()\n def g(x):\n t=100*h*math.sin(h*x/10)+10\n return t\n return g\n def fy():\n h=np.random.random()\n def g(x):\n t=50*h*math.sin(h*x/5)-10\n return t\n return g\n pro_dic['x']=fx\n pro_dic['y']=fy\n pro_dic['w']=1\n pro_dic['alpha']=2\n pro_dic['twe']=(0,0)\n pro_dic['ler']=(0,0)\n num_pros=3\n pro_dics=[fpro_config(pro_dic) for _ in range(num_pros)]\n task_dic={}\n task_dic['ez']=(10,20)\n task_dic['rz']=(10,20)\n maxnum_tasks=4\n task_dics=[ftask_config(task_dic) for _ in range(maxnum_tasks)]\n job_d={}\n job_d['time']=(1,9)\n job_d['womiga']=(0.5,1)\n job_d['sigma']=(0.5,1)\n job_d['num']=(1,maxnum_tasks)\n job_dic=fjob_config(job_d)\n loc_config=floc_config()\n z=['Q','T','C','F']\n lams={x:1 for x in z}\n bases={x:1 for x in z}\n job_pros=CSENV(pro_dics,maxnum_tasks,task_dics,job_dic,loc_config,lams,100,bases,bases,[7],[9])\n state=job_pros.reset()\n A=state[0].reshape(num_pros,-1)\n A=np.around(A,2)\n l=list(np.arange(maxnum_tasks))\n ls=['er', 'econs', 'rcons', 'B', 'p', 'g', 'twe', 'ler', 'w', 'alpha','PF','Aq', 'x', 'y', 'vx','vy']\n ls.extend(l)\n pd.DataFrame(A,columns=ls,index=['pro_1','pro_2','pro_3']).to_csv('sample.csv')\n rand_agent=RANDOM_AGENT(maxnum_tasks)\n done=0\n while not done:\n action=rand_agent.take_action(state)\n state,_,done,_,_=job_pros.step(action)\n print(job_pros.tar_dic)\n print(job_pros.sum_tar)\n print(job_pros.tarb_dic)\n print(job_pros.sum_tarb)","repo_name":"zylovestt/CS","sub_path":"CS_ENV.py","file_name":"CS_ENV.py","file_ext":"py","file_size_in_byte":15071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10485565606","text":"\"\"\"empty message\n\nRevision ID: e39a711d4bec\nRevises: 35d81f89a521\nCreate Date: 2021-10-01 12:44:07.903266\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e39a711d4bec'\ndown_revision = '35d81f89a521'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('corporate_announcement', 'category',\n existing_type=sa.VARCHAR(),\n nullable=True)\n #op.drop_column('crm_contact', 'industry')\n #op.alter_column('personalised_video_invitee', 'account_id',\n # existing_type=sa.INTEGER(),\n # nullable=False)\n #op.create_foreign_key(None, 'personalised_video_invitee', 'account', ['account_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n #op.drop_constraint(None, 'personalised_video_invitee', type_='foreignkey')\n #op.alter_column('personalised_video_invitee', 'account_id',\n # existing_type=sa.INTEGER(),\n # nullable=True)\n #op.add_column('crm_contact', sa.Column('industry', sa.VARCHAR(), autoincrement=False, nullable=True))\n op.alter_column('corporate_announcement', 'category',\n existing_type=sa.VARCHAR(),\n nullable=False)\n # ### end Alembic commands ###\n","repo_name":"Witzcode0/Exchange-connect","sub_path":"migrations/versions/e39a711d4bec_.py","file_name":"e39a711d4bec_.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1173907337","text":"from enum import Enum\n\nclass State(Enum):\n NOT_INITIALIZED = 1\n IN_PROGRESS = 2\n ABANDONED = 3\n FINISHED = 4\n WAITING_REMOTE_MOVE = 5\n\nclass Move(Enum):\n CONSTRUCTION = 1\n CHANGE = 2\n GIVE_UP = 3\n INITIAL = 4","repo_name":"Livia-ferrao/INE5417-Engenharia_de_SoftwareI","sub_path":"code/classes/enums.py","file_name":"enums.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"591503503","text":"import pandas as pd\nfrom pandas import Series, DataFrame\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport xlsxwriter\nimport numpy as np\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn import svm\nimport metrics\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\n\ntrain_data = DataFrame(pd.read_csv('/Users/qinanyu/Desktop/datapackages/insurance/train.csv'))\ntest_data = DataFrame(pd.read_csv('/Users/qinanyu/Desktop/datapackages/insurance/test.csv'))\n\n# 确定没有需要清洗的数据\ntrain_data.drop(\"id\",axis=1,inplace=True)\ntest_data.drop(\"id\",axis=1,inplace=True)\n'''\nprint('---------------------train data info---------------------')\nprint(train_data.describe())\nprint('----------------------test data info---------------------')\nprint(test_data.describe())\n'''\n\n#数据替换和清洗\ntrain_data['Gender']=train_data['Gender'].map({'Male':1,'Female':0})\ntrain_data['Vehicle_Age']=train_data['Vehicle_Age'].map({'> 2 Years':2,'1-2 Year':1,'< 1 Year':0})\ntrain_data['Vehicle_Damage']=train_data['Vehicle_Damage'].map({'Yes':1,'No':0})\n\ntest_data['Gender']=test_data['Gender'].map({'Male':1,'Female':0})\ntest_data['Vehicle_Age']=test_data['Vehicle_Age'].map({'> 2 Years':2,'1-2 Year':1,'< 1 Year':0})\ntest_data['Vehicle_Damage']=test_data['Vehicle_Damage'].map({'Yes':1,'No':0})\n\n\n'''\n#热力值关联分析\nsns.countplot(train_data['Response'],label=\"Count\")\nplt.show()\n#用热力图呈现相关性\ncorr = train_data[list(train_data.columns[0:10])].corr()\nplt.figure(figsize=(11,11))\nannot=True #显示每个方格的数据\nsns.heatmap(corr,annot=True)\nplt.show()\n'''\n#数据拆分\nfeatures = ['Gender', 'Age', 'Driving_License', 'Region_Code',\n 'Previously_Insured', 'Vehicle_Age', 'Vehicle_Damage',\n 'Annual_Premium', 'Policy_Sales_Channel', 'Vintage']\ntrain_x, test_x, train_y, test_y = train_test_split(train_data.loc[:,\n ['Gender', 'Age', 'Driving_License', 'Region_Code',\n 'Previously_Insured', 'Vehicle_Age', 'Vehicle_Damage',\n 'Annual_Premium', 'Policy_Sales_Channel', 'Vintage']],\n train_data.loc[:, ['Response']], test_size=0.25,\n random_state=33)\n\n\n#决策树算法\ndef reg_tree(train_x, test_x, train_y, test_y,train_data,test_data):\n #特征值选择\n\n train_features= train_data[features]\n train_labels = train_data['Response']\n\n\n dvec=DictVectorizer(sparse=False)\n # 代码中使用了 fit_transform 这个函数,它可以将特征向量转化为特征值矩阵\n train_features=dvec.fit_transform(train_features.to_dict(orient='record'))\n\n from sklearn.tree import DecisionTreeClassifier\n #构建ID3决策树\n clf= DecisionTreeClassifier(criterion='entropy')\n #决策树训练\n clf.fit(train_x, train_y)\n '''\n #决策树预测\n pred_labels = clf.predict(test_ss_x)\n\n from sklearn.model_selection import cross_val_score\n #使用K折交叉验证 统计决策树准确率\n print(u'cross_val_score准确率为 %.4lf' % np.mean(cross_val_score(clf, train_features, train_labels, cv=10)))\n mse = mean_squared_error(test_y, pred_labels)\n print(\"决策树均方误差 = \", round(mse, 2))\n print(\"决策树预测结果\",pred_labels)\n\n sns.countplot(test_y['Response'], label=\"Count\")\n plt.title(\"test-y\")\n plt.show()\n'''\n\n test_result = clf.predict(test_data)\n\n sns.countplot(test_result, label=\"Count\")\n plt.title(\"reg-tree\")\n plt.show()\n\nreg_tree(train_x, test_x, train_y, test_y,train_data,test_data)\n\n#KNN算法\ndef KNN(train_x, test_x, train_y, test_y,train_data,test_data):\n KNeighborsClassifier(n_neighbors=5000, weights='uniform', algorithm='auto', leaf_size=50)\n\n #采用Z-Score规范化\n ss = StandardScaler()\n train_ss_x = ss.fit_transform(train_x)\n test_ss_x = ss.transform(test_x)\n\n #创建KNN分类器\n knn = KNeighborsClassifier()\n knn.fit(train_ss_x, train_y)\n # predict_y = knn.predict(test_ss_x)\n # mse = mean_squared_error(test_y, predict_y)\n\n '''print(\"KNN准确率: %.4lf\" % accuracy_score(test_y, predict_y))\n print(\"KNN均方误差 = \", round(mse, 2))\n print(\"KNN预测结果\",predict_y)\n'''\n test_result = knn.predict(test_data)\n sns.countplot(test_result, label=\"count\")\n plt.title(\"KNN\")\n plt.show()\n\n'''\n data = pd.DataFrame({'KNN': test_result})\n datatoexcel = pd.ExcelWriter(\"KNNresult.xlsx\", engine='xlsxwriter')\n data.to_excel(datatoexcel, sheet_name='Sheet1')\n datatoexcel.save()\n'''\n\n\n#KNN(train_x, test_x, train_y, test_y,train_data,test_data)\n\n#Adaboost算法\ndef Adaboost(train_x, test_x, train_y, test_y,train_data,test_data):\n\n # 采用Z-Score规范化\n ss = StandardScaler()\n train_ss_x = ss.fit_transform(train_x)\n test_ss_x = ss.transform(test_x)\n\n # 使用AdaBoost分类模型\n ada = AdaBoostClassifier(n_estimators=2000, random_state=0)\n ada.fit(train_ss_x, train_y)\n '''\n pred_y = ada.predict(test_ss_x)\n mse = mean_squared_error(test_y, pred_y)\n print(\"均方误差 = \", round(mse, 2))\n print(\"预测数值\",pred_y)\n' '''\n\n test_result = ada.predict(test_data)\n\n sns.countplot(test_result, label=\"count\")\n plt.title(\"Ababoost\")\n plt.show()\n '''\n data = pd.DataFrame({'Adaboost':pred_y})\n datatoexcel = pd.ExcelWriter(\"Insurance_result1.xlsx\", engine='xlsxwriter')\n data.to_excel(datatoexcel, sheet_name='Sheet1')\n datatoexcel.save()\n '''\n\n#Adaboost(train_x, test_x, train_y, test_y,train_data,test_data)\n\n#SVM\ndef SVM(train_x, test_x, train_y, test_y,train_data,test_data):\n\n #创建SVM分类器\n model = svm.SVC()\n # 用训练集做训练\n model.fit(train_x, train_y)\n\n # 用测试集做预测\n # prediction = model.predict(test_x)\n test_result = model.predict(test_data)\n sns.countplot(test_result, label=\"count\")\n plt.title(\"SVM\")\n plt.show()\n\n #print('准确率: ', metrics.accuracy_score(prediction, test_y))\n\n#SVM(train_x, test_x, train_y, test_y,train_data,test_data)\n\n#随机森林\ndef Randforest(train_x, test_x, train_y, test_y,train_data,test_data):\n rf = RandomForestClassifier(random_state=1, criterion='gini')\n\n '''\n parameters = {\"n_estimators\": range(1, 11)}\n # 使用GridSearchCV进行参数调优\n clf = GridSearchCV(estimator=rf, param_grid=parameters)\n # 对数据集进行分类\n clf.fit(train_x, train_y)\n print(\"最优分数: %.4lf\" % clf.best_score_)\n print(\"最优参数:\", clf.best_params_)\n '''\n\n rf.fit(train_x,train_y)\n # pred_y= rf.predict(test_x)\n test_result= rf.predict(test_data)\n\n sns.countplot(test_result, label=\"count\")\n plt.title(\"Random Forest\")\n plt.show()\n\n\n#Randforest(train_x, test_x, train_y, test_y,train_data,test_data)","repo_name":"yuqinan/MLimplementation","sub_path":"PythonInsurance.py","file_name":"PythonInsurance.py","file_ext":"py","file_size_in_byte":7380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14393011180","text":"import random\n\nasync def main(self, message):\n # All the possible story strings that can be given\n all_action_strings = [\n \"disembowels a critically endangered gorrila\",\n \"derails the polar express using five pounds of sugar and a flame thrower\",\n \"starts WWIII\",\n \"eats a Big Mac medium meal and dies\",\n \"commits arson\",\n \"weaponizes a Toyota pickup truck\",\n \"single handedly wipes Kansas off the map\",\n \"renovates a garden for charity\",\n \"takes an old lady out for dinner\",\n \"fulfills a terminally ill child's dream of driving in a pink lamborghini\"\n ]\n # The amount of people to select\n num_people = 3\n # Get three random strings\n random_strings = random.sample(all_action_strings, num_people)\n # Random users that will be used in the message\n random_users = random.sample(message.guild.members, num_people)\n # The string that is returned at the end\n story_str = \"\"\n \n # Create the story\n for i in range(len(random_users)):\n user = random_users[i]\n string = random_strings[i]\n\n if (i == len(random_users) - 1):\n story_str += f\"and <@{user.id}> {string}\"\n else:\n story_str += f\"<@{user.id}> {string},\"\n story_str += \"\\n\"\n\n await message.channel.send(story_str)","repo_name":"Loes-Bois/Bot-tom-gear","sub_path":"commands/btm_gear.py","file_name":"btm_gear.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40708304802","text":"import matplotlib.pyplot as plt\nimport random\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\nimport os\nimport argparse\nimport h5py\n\n\ndef parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data\", type=str, default='teapot.obj',\n help=\"Specify the path to dataset (teapot.obj/dsprite.hdf5)\")\n parser.add_argument(\"--steps\", type=int, default=4,\n help=\"Steps per trajectory\")\n parser.add_argument(\"--trajs\", type=int, default=1000,\n help=\"Number of trajectories to generate\")\n parser.add_argument(\"--save_dir\", type=str, default='./trajs/',\n help=\"Path to save\")\n parser.add_argument(\"--composite_action\", action='store_true',\n help=\"Use composite action\")\n parser.add_argument(\"--latent_active\", type=int, nargs='+', default=[0, 1, 2, 3, 4],\n help=\"Active latents (only for dsprite)\")\n parser.add_argument(\"--rotate_translation\", action='store_true')\n return parser.parse_args()\n\n\ndef gen_teapot(args):\n \"\"\"\n Generates teapot trajectories with random policy\n The action space consists of 9 actions = 1 (idle) + 6 (+-rotations x/y/z) + 2 (+- color increment)\n code from https://github.com/IndustAI/learning-group-structure/\n teapot object from https://graphics.stanford.edu/courses/cs148-10-summer/as3/code/as3/teapot.obj\n \"\"\"\n\n IMG_SIZE = 64\n\n triangles = []\n vertices = []\n with open(args.data) as f:\n for line in f:\n components = line.strip(' \\n').split(' ')\n if components[0] == \"f\": # face data\n # e.g. \"f 1/1/1/ 2/2/2 3/3/3 4/4/4 ...\"\n indices = list(map(lambda c: int(c.split('/')[0]) - 1, components[1:]))\n for i in range(0, len(indices) - 2):\n triangles.append(indices[i: i+3])\n elif components[0] == \"v\": # vertex data\n # e.g. \"v 30.2180 89.5757 -76.8089\"\n vertex = list(map(lambda c: float(c), components[1:]))\n vertices.append(vertex)\n vertices_init, triangles = np.array(vertices), np.array(triangles)\n\n angle = 2 * np.pi / 5\n colors = [\n [0, 0, 0],\n [255, 0, 0],\n [255, 255, 255],\n [0, 255, 0],\n [0, 0, 255]]\n\n matrices = [\n np.matrix([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]]),\n np.matrix([[np.cos(angle), 0, np.sin(angle)],\n [0, 1, 0],\n [-np.sin(angle), 0, np.cos(angle)]]),\n np.matrix([[np.cos(angle), 0, -np.sin(angle)],\n [0, 1, 0],\n [np.sin(angle), 0, np.cos(angle)]]),\n np.matrix([[1, 0, 0],\n [0, np.cos(angle), np.sin(angle)],\n [0, -np.sin(angle), np.cos(angle)]]),\n np.matrix([[1, 0, 0],\n [0, np.cos(angle), -np.sin(angle)],\n [0, np.sin(angle), np.cos(angle)]]),\n np.matrix([[np.cos(angle), np.sin(angle), 0],\n [-np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]]),\n np.matrix([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]]),\n np.matrix([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]]),\n np.matrix([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]]),\n ]\n imgs = []\n actions = []\n\n def vertices_to_img(v, c):\n # First, plot 3D image of a teapot and save as image\n\n x = np.asarray(vertices[:, 0]).squeeze()\n y = np.asarray(vertices[:, 1]).squeeze()\n z = np.asarray(vertices[:, 2]).squeeze()\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.grid(None)\n ax.axis('off')\n ax.set_xlim([-3, 3])\n ax.set_ylim([-3, 3])\n ax.set_zlim([0, 3])\n ax.plot_trisurf(x, z, triangles, y, shade=True, color='white')\n ax.view_init(100, angle)\n img_path = os.path.join(traj_path, f'teapot_{i}.png')\n plt.savefig(img_path)\n plt.close()\n\n # Then load the image, crop, resize it, and change background color\n\n img = Image.open(img_path).convert('RGB')\n img = img.crop((100, 0, 350, 258))\n img = img.resize((IMG_SIZE, IMG_SIZE))\n arr = np.array(img)\n arr = np.where(arr == [255, 255, 255], colors[color_index], arr)\n return arr\n\n for t in tqdm(range(args.trajs)):\n traj_path = os.path.join(args.save_dir, str(t))\n os.makedirs(traj_path, exist_ok=True)\n\n # reset\n color_index = 0\n vertices = np.copy(vertices_init)\n imgs.append(vertices_to_img(vertices, color_index))\n\n # randomize initial state\n for _ in range(10):\n action = random.randrange(9)\n if action == 7: # Change color by +1 increment\n color_index = (color_index + 1) % 5\n elif action == 8: # Change color by -1 increment\n color_index = (color_index - 1) % 5\n elif action in [1, 2, 3, 4, 5, 6]:\n # Change viewpoint of teapot\n vertices = vertices * matrices[action]\n\n for i in range(args.steps):\n\n action = random.randrange(9)\n\n if action == 7: # Change color by +1 increment\n color_index = (color_index + 1) % 5\n elif action == 8: # Change color by -1 increment\n color_index = (color_index - 1) % 5\n elif action in [1, 2, 3, 4, 5, 6]:\n # Change viewpoint of teapot\n vertices = vertices * matrices[action]\n\n actions.append(action)\n\n arr = vertices_to_img(vertices, color_index)\n imgs.append(arr)\n\n imgs = np.array(imgs).reshape(args.trajs, args.steps + 1, IMG_SIZE, IMG_SIZE, 3)\n actions = np.array(actions).reshape(args.trajs, args.steps)\n\n # Save trajectories\n np.savez(os.path.join(args.save_dir, 'trajs.npz'),\n imgs=imgs,\n actions=actions,\n n_actions=np.array(9))\n\n\ndef gen_dsprites(args):\n \"\"\"\n Generate trajectories from dsprites dataset\n\n Shape: square, ellipse, heart\n Scale: 6 values linearly spaced in [0.5, 1]\n Orientation: 40 values in [0, 2 pi]\n Position X: 32 values in [0, 1]\n Position Y: 32 values in [0, 1]\n \"\"\"\n\n N_LATENTS = 5\n T_RANGE = 5\n\n with h5py.File(args.data, 'r') as f:\n _imgs = f['imgs'][:]\n _lats = f['latents']['values'][:]\n\n periods = [3, 6, 40, 32, 32]\n\n def coord_to_idx(c):\n return (32 * 32 * 40 * 6) * coord[0] \\\n + (32 * 32 * 40) * coord[1] \\\n + (32 * 32) * coord[2] \\\n + 32 * coord[3] \\\n + coord[4]\n\n def action_map(action):\n q = action\n vec = []\n for _ in args.latent_active:\n q, r = divmod(q, 2 * T_RANGE + 1)\n vec.append(r - T_RANGE)\n return [0] * (N_LATENTS - len(args.latent_active)) + vec\n\n imgs, actions = [], []\n if args.composite_action:\n n_actions = (2 * T_RANGE + 1) ** len(args.latent_active)\n else:\n n_actions = len(args.latent_active) * 2 + 1\n\n for t in tqdm(range(args.trajs)):\n\n coord = [0, 2, 0, 0, 0]\n\n # randomize initial state\n for lat in args.latent_active:\n coord[lat] = random.randrange(periods[lat])\n\n imgs.append(_imgs[coord_to_idx(coord)])\n\n for i in range(args.steps):\n action = random.randrange(n_actions)\n\n if args.composite_action:\n d = action_map(action)\n for i in range(N_LATENTS):\n coord[i] = (coord[i] + d[i]) % periods[i]\n else:\n if action != 0:\n _idx, delta = divmod(action - 1, 2)\n delta = delta * 2 - 1\n latent_idx = args.latent_active[_idx]\n if (latent_idx == 3 or latent_idx == 4) and args.rotate_translation:\n if latent_idx == 3:\n if delta == 1:\n coord[3] = (coord[3] + 1) % periods[3]\n coord[4] = (coord[4] + 1) % periods[4]\n else:\n coord[3] = (coord[3] - 1) % periods[3]\n coord[4] = (coord[4] - 1) % periods[4]\n else:\n if delta == 1:\n coord[3] = (coord[3] + 1) % periods[3]\n coord[4] = (coord[4] - 1) % periods[3]\n else:\n coord[3] = (coord[3] - 1) % periods[3]\n coord[4] = (coord[4] + 1) % periods[3]\n else:\n coord[latent_idx] = (coord[latent_idx] + delta) % periods[latent_idx]\n imgs.append(_imgs[coord_to_idx(coord)])\n actions.append(action)\n\n imgs = np.array(imgs).reshape(args.trajs, args.steps + 1, 64, 64)\n actions = np.array(actions).reshape(args.trajs, args.steps)\n\n # Save trajectories\n np.savez(os.path.join(args.save_dir, 'trajs.npz'),\n imgs=imgs.astype(np.uint8),\n actions=actions,\n n_actions=np.array(n_actions))\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n os.makedirs(args.save_dir, exist_ok=True)\n if 'teapot' in args.data:\n print('Generating teapot trajectories...')\n gen_teapot(args)\n elif 'dsprites' in args.data:\n print('Generating dsprites trajectories...')\n gen_dsprites(args)\n else:\n raise ValueError\n","repo_name":"hamzakeurti/homomorphismvae","sub_path":"displacementae/data/gen_trajs.py","file_name":"gen_trajs.py","file_ext":"py","file_size_in_byte":9799,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"13468076288","text":"import streamlit as st\r\nimport cv2\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom io import BytesIO\r\nimport base64\r\nfrom streamlit_cropper import st_cropper\r\n\r\n\r\nst.title(\"Make your own digital signature\")\r\nst.write(\"Upload image of your signature\")\r\nimg_file = st.file_uploader(\"Image file has to be either 'jpg', 'jpeg', or 'png'\", type=['jpg', 'jpeg', 'png'])\r\ncamera_file = None\r\nselect_camera = st.checkbox(\"Select to take a photo from your camera\", value = False)\r\nif select_camera:\r\n camera_file = st.camera_input(\"Photo of your signature\")\r\n\r\nrealtime_update = st.checkbox(label=\"Update in Real Time\", value=True)\r\n\r\ndef signature(img, selected, block_size = 25, choose_c = 10.0, thr = 127):\r\n sig_cr = img[:, :, ::1]\r\n sig_gs = cv2.cvtColor(sig_cr, 6)\r\n if selected == \"Binary\":\r\n revlt, sig_MASK = cv2.threshold(sig_gs, thr, 255, cv2.THRESH_BINARY_INV)\r\n bc, gc, rc = cv2.split(sig_cr)\r\n new_sig = [gc, bc, rc, sig_MASK]\r\n new_sig_merged = cv2.merge(new_sig, 4)\r\n else:\r\n sig_adaptive = cv2.adaptiveThreshold(sig_gs, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, block_size, choose_c)\r\n bc, gc, rc = cv2.split(sig_cr)\r\n new_sig = [gc, bc, rc, sig_adaptive]\r\n new_sig_merged = cv2.merge(new_sig, 4)\r\n return new_sig_merged[:, :, ::1]\r\n\r\ndef get_image_download_link(img, filename, text):\r\n buffered = BytesIO()\r\n img.save(buffered, format=\"PNG\")\r\n img_str = base64.b64encode(buffered.getvalue()).decode()\r\n href = f'{text}'\r\n return href\r\n\r\n\r\nif img_file:\r\n img = Image.open(img_file)\r\n if not realtime_update:\r\n st.write(\"Double click to save crop\")\r\n # Get a cropped image from the frontend\r\n cropped_img = st_cropper(img, realtime_update=realtime_update)\r\n st.write(\"Input\")\r\n _ = cropped_img.thumbnail((500, 500))\r\n st.image(cropped_img)\r\n cvt_img = np.array(cropped_img)\r\n save_it = st.checkbox(label=\"Press when finished with cropping\", value=False)\r\n if save_it:\r\n select_thr = st.selectbox(\"Choose thresholding method\", [\"Binary\", \"Adaptive\"])\r\n if select_thr == \"Binary\":\r\n thr = st.slider(\"Choose threshold value\", 0, 255, step=1, value=127)\r\n cvt_img_res = signature(cvt_img, select_thr, thr = thr)\r\n else:\r\n block_size = st.slider(\"Choose block size value\", 1, 51, step=2, value=25)\r\n choose_c = st.slider(\"Choose C value\", -15.0, 50.0, step = 0.1, value = 16.0)\r\n cvt_img_res = signature(cvt_img, select_thr, block_size, choose_c)\r\n st.write(\"Output\")\r\n st.image(cvt_img_res, width=500)\r\n out_image = Image.fromarray(cvt_img_res[:, :, ::1])\r\n st.markdown(get_image_download_link(out_image, \"your_signature.png\", 'Download Output Image'),\r\n unsafe_allow_html=True)\r\nelif camera_file:\r\n img = Image.open(camera_file)\r\n st.write(type(img))\r\n if not realtime_update:\r\n st.write(\"Double click to save crop\")\r\n # Get a cropped image from the frontend\r\n cropped_img = st_cropper(img, realtime_update=realtime_update)\r\n st.write(\"Input\")\r\n _ = cropped_img.thumbnail((500, 500))\r\n st.image(cropped_img)\r\n st.write(type(cropped_img))\r\n cvt_img = np.array(cropped_img)\r\n st.write(type(cvt_img))\r\n save_it = st.checkbox(label=\"Press when finished with cropping\", value=False)\r\n if save_it:\r\n select_thr = st.selectbox(\"Choose thresholding method\", [\"Binary\", \"Adaptive\"])\r\n if select_thr == \"Binary\":\r\n thr = st.slider(\"Choose threshold value\", 0, 255, step=1, value=127)\r\n cvt_img_res = signature(cvt_img, select_thr, thr = thr)\r\n else:\r\n block_size = st.slider(\"Choose block size value\", 1, 51, step=2, value=25)\r\n choose_c = st.slider(\"Choose C value\", -15.0, 50.0, step = 0.1, value = 16.0)\r\n cvt_img_res = signature(cvt_img, select_thr, block_size, choose_c)\r\n st.write(\"Output\")\r\n st.image(cvt_img_res, width=500)\r\n out_image = Image.fromarray(cvt_img_res[:, :, ::1])\r\n st.markdown(get_image_download_link(out_image, \"your_signature.png\", 'Download Output Image'),\r\n unsafe_allow_html=True)\r\n","repo_name":"RPalpatine/SignatureWebApp","sub_path":"SignatureApp.py","file_name":"SignatureApp.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2465664334","text":"import argparse\nimport csv\nimport math\nimport numpy as np\nimport sys\nfrom sklearn.decomposition import PCA\nimport pandas as pd\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom ggplot import *\ndef get_coefficients(data_len, results, ground_truth):\n check = {True:1, False:0}\n derived_clusters = [[0 for x in range(data_len)] for y in range(data_len)]\n ground_clusters = [[0 for x in range(data_len)] for y in range(data_len)]\n for i in range(data_len):\n for j in range(i,data_len):\n derived_clusters[i][j] = derived_clusters[j][i] = check[results[i] == results[j]]\n ground_clusters[i][j] = ground_clusters[j][i] = check[ground_truth[i][1] == ground_truth[j][1]]\n \n TP, TN, FP, FN = (0,)*4\n same_results = {False:'TP += 1', True:'TN += 1'}\n different_results = {False:'FP += 1', True:'FN += 1'}\n\n for i in range(data_len):\n for j in range(data_len):\n if derived_clusters[i][j] != ground_clusters[i][j]:\n exec(different_results[(derived_clusters[i][j] == 0 and ground_clusters[i][j] == 1)])\n else:\n exec(same_results[(derived_clusters[i][j] == 0)])\n\n rand_index = (float) (TP+TN) / (TP+FP+FN+TN)\n jaccard_coeff = (float) (TP) / (TP+FP+FN)\n return rand_index, jaccard_coeff\n#calculate and print Jaccard Index\ndef calc_jc_index(obtained_mat, given_mat):\n similar = np.sum(np.logical_and(obtained_mat,given_mat))\n non_similar = np.sum(np.logical_or(obtained_mat,given_mat))\n return (similar/ non_similar)\n\n# Convert labels to 2d sparse matrix\ndef convert_label_to_sparse(output_cluster):\n N = len(output_cluster)\n ouput_mat = np.zeros((N , N))\n for i in range(0,N):\n for j in range(0, N):\n if(output_cluster[i] == output_cluster[j]):\n ouput_mat[i,j] = 1\n \n return ouput_mat\n\ndef read_file(file_path):\n with open(file_path) as file:\n return list(csv.reader(file, delimiter=\"\\t\"))\n \n''' Credit for the plot : https://stackoverflow.com/questions/21654635/scatter-plots-in-pandas-pyplot-how-to-plot-by-category ''' \n\ndef plot_graphs(l1, l2, y, name, data_set):\n df = pd.DataFrame(dict(x=l1, y=l2, label=y))\n g = ggplot(aes(x='x', y='y', color='label'), data=df) + geom_point(size=50) + theme_bw()\n g.save(name+\"_\"+data_set+\".jpg\")\n\n\ndef recursive_dendrogram(data_distances):\n global total_clusters, clusters, current_count\n\n min_distance=1000\n min_first_d, min_second_d=(-1,)*2\n \n for k in range (0, current_count):\n for l in range (0, current_count):\n if(k!=l):\n if(min_distance>data_distances[k][l]):\n min_distance=data_distances[k][l]\n min_first_d=k\n min_second_d=l\n\n if min_first_d==-1 or min_second_d==-1:\n return\n\n clusters[total_clusters]=[]\n clusters[total_clusters].append(data_distances[min_first_d][current_count])\n clusters[total_clusters].append(data_distances[min_second_d][current_count])\n data_distances[min_first_d][current_count]=total_clusters\n total_clusters+=1\n\n for m in range (0, current_count):\n if(m!=min_first_d or m!=min_second_d):\n temp_min = min(data_distances[min_first_d][m],data_distances[min_second_d][m])\n data_distances[min_first_d][m]= temp_min\n data_distances[m][min_first_d]=data_distances[min_first_d][m]\n\n data_distances=np.delete(data_distances,min_second_d,0)\n data_distances=np.delete(data_distances,min_second_d,1)\n current_count-=1\n\n recursive_dendrogram(data_distances)\n\n return\n\nif __name__ == \"__main__\":\n\n files = ['new_dataset_2.txt']\n for file in files:\n data = read_file(file)\n data_len = len(data)\n num_clusters=3\n dimensions = len(data[0])\n dimensions = dimensions-2\n\n new_data_set=np.zeros((data_len, dimensions+3))\n for i in range (0, data_len):\n for j in range (0, dimensions+2):\n new_data_set[i][j]=data[i][j]\n\n data_distances= euclidean_distances(new_data_set[0:data_len,2:dimensions+2], new_data_set[0:data_len,2:dimensions+2])\n stacking = 'np.append(data_distances,np.ones([len(data_distances),1]),1)'\n data_distances=np.append(data_distances,np.ones([len(data_distances),1]),1)\n data_distances=np.append(data_distances,np.ones([len(data_distances),1]),1)\n\n for j in range (0, data_len):\n data_distances[j][data_len]=j+1\n data_distances[j][data_len+1]=-1\n\n\n total_clusters=data_len+1\n clusters={}\n current_count=data_len\n\n recursive_dendrogram(data_distances)\n clusterinfo=np.zeros(shape=(total_clusters-num_clusters+2,1))\n clusterinfo[0]=1\n first=total_clusters-num_clusters\n current_cluster=1\n cluster_cloud={}\n\n while first>0:\n cluster_numbers=[]\n clusterinfo[first]=1\n while len(clusters[first])>0:\n num_cluster=int(clusters[first].pop())\n clusterinfo[num_cluster]=1\n if num_cluster>data_len:\n for j in range (0, len(clusters[num_cluster])):\n clusters[first].append(clusters[num_cluster][j])\n else:\n cluster_numbers.append(num_cluster)\n cluster_cloud[current_cluster]=cluster_numbers\n current_cluster+=1\n\n while clusterinfo[first]==1:\n if first>data_len+1:\n first=first-1\n else:\n first=0\n break\n\n this_cluster=1\n\n for c in cluster_cloud:\n for j in range(0, len(cluster_cloud[c])):\n new_data_set[cluster_cloud[c][j]-1][dimensions+2]=this_cluster\n this_cluster=this_cluster+1\n\n \n results = new_data_set[:,-1]\n results = [int(i) for i in results]\n print(results)\n results_sparse = convert_label_to_sparse(results)\n\n ground_truth_sparse = convert_label_to_sparse(np.array(data)[:,1])\n \n rand_index, jaccard_coeff = get_coefficients( data_len, results,data)\n \n jaccard_coeff = calc_jc_index( results_sparse, ground_truth_sparse)\n pca = PCA(n_components=2)\n input_vector = pca.fit_transform(np.array(data)[:,2:np.array(data).shape[1]])\n plot_graphs(input_vector[:,0],input_vector[:,1], results, \"dbscan\",file)\n \n \n print (\"####### Results for : \"+ file + \" ##############\")\n print ( \" Jaccard Coeff:\"+ str(jaccard_coeff) + \" rand index: \"+ str(rand_index) )\n ","repo_name":"roopaliv/dbscan-and-agglomerative-clustering","sub_path":"agglo.py","file_name":"agglo.py","file_ext":"py","file_size_in_byte":6667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41318641506","text":"###\n#-------------------------------------------------------------------------------\n# problem3.py\n#-------------------------------------------------------------------------------\n#\n# Author: Alwin Tareen\n# Created: Feb 04, 2022\n# Execution: python3 problem3.py\n#\n# This program determines the fixed monthly payment needed to pay off a debt.\n#\n##\n\ndef bisectionPayoff(balance, annualInterestRate):\n monthlyInterestRate = annualInterestRate / 12.0\n initialBalance = balance\n lowerBound = balance / 12.0\n upperBound = (balance * (1 + monthlyInterestRate)**12) / 12.0\n fixedMonthlyPayment = 0\n\n while not (0.0 <= balance <= 0.01):\n balance = initialBalance\n fixedMonthlyPayment = (lowerBound + upperBound) / 2.0\n for month in range(1, 13):\n monthlyUnpaidBalance = balance - fixedMonthlyPayment\n balance = monthlyUnpaidBalance + (monthlyInterestRate * monthlyUnpaidBalance)\n if balance < 0.0:\n upperBound = fixedMonthlyPayment\n else:\n lowerBound = fixedMonthlyPayment\n return f'Lowest payment: {round(fixedMonthlyPayment, 2)}'\n","repo_name":"altareen/mitopencourseware","sub_path":"introToPython6.00.1x/problemSet2/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15925972324","text":"# A for Rock, B for Paper, and C for Scissors (opponent)\n# X means you need to lose, Y means you need to end the round in a draw, and Z means you need to win\n\nshapePoint = {\n 'rock' : 1,\n 'paper' : 2,\n 'scissor' : 3\n}\n\ngamePoint = {\n 'lost' : 0,\n 'draw' : 3,\n 'win' : 6\n}\n\nopponentShapes = {\n 'A' : 'rock',\n 'B' : 'paper',\n 'C' : 'scissor'\n}\n\nmyMoves = {\n 'X' : 'lost',\n 'Y' : 'draw',\n 'Z' : 'win'\n}\n\ndef loseShape(shape: str) -> str:\n '''Returns shape that loses to the argument shape'''\n return {\n 'rock' : 'scissor',\n 'paper' : 'rock',\n 'scissor' : 'paper'\n }[shape]\n \ndef winShape(shape: str) -> str:\n '''Returns shape that wins the argument shape'''\n return {\n 'rock' : 'paper',\n 'paper' : 'scissor',\n 'scissor' : 'rock'\n }[shape]\n \npoints = 0\n \nwith open(\"inputs/tehtava2_input.txt\", \"r\") as f:\n lines = f.readlines()\n for line in lines:\n opponentShape = opponentShapes[line[0]]\n myMove = myMoves[line[2]]\n if (myMove == 'win'):\n points += gamePoint['win']\n points += shapePoint[winShape(opponentShape)]\n elif (myMove == 'draw'):\n points += gamePoint['draw']\n points += shapePoint[opponentShape]\n elif (myMove == 'lost'):\n points += gamePoint['lost']\n points += shapePoint[loseShape(opponentShape)]\n print(f'mymove {myMove} loseshape {loseShape(opponentShape)} ')\n\nprint(points)\n \n \n \n \n ","repo_name":"svhein/Advent_of_Code","sub_path":"2022/paiva2_2.py","file_name":"paiva2_2.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2976960617","text":"\"\"\" \nWritten by: Samavedam Manikhanta Praphul\nVersion: 1.0\nDescription: \nThis file trains a CNN architecture based model for \nMNIST dataset based on the system arguments passed. \n\"\"\"\n\n# Importing required packages\n# Third party imports\nimport torch # For PyTorch functionalities\nfrom torchviz import make_dot\n\n# Local imports\nfrom models import BaseNetwork\nfrom utils import parse_arguments, test_network, train_network_single_epoch, visualize_errors_over_training\nfrom utils import get_mnist_data_loaders, visualize_data_loader_data\n\n\ndef main():\n \"\"\"This is the function which runs when run as a standalone script.\n Returns 0 if the script exits successfully.\n \"\"\"\n\n # Parse the command line arguments for get defaults.\n desc = \"Trains basic neural network on MNIST for 5 epochs overwriting defaults by command line\"\n samples, learning_rate, momentum, log_interval, train_batch_size,\\\n test_batch_size, number_of_epochs = parse_arguments(description=desc)\n\n # Disable the cudnn\n torch.backends.cudnn.enabled = False\n\n # Setting the seed for reproducibility of results.\n random_seed = 45\n torch.manual_seed(random_seed)\n\n # Get the MNIST data if data doesn't exist\n train_data_loader, test_data_loader = get_mnist_data_loaders(train_batch_size=train_batch_size,\n test_batch_size=test_batch_size)\n\n #Visualize the data\n visualize_data_loader_data(train_data_loader, samples, \"Visualizing first 8 train data points\")\n visualize_data_loader_data(test_data_loader, samples, \"Visualizing first 8 test data points\")\n\n # Define the model, optimizer and loss function\n model = BaseNetwork()\n optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate, momentum=momentum)\n\n #Visualize the model\n for _, (image_data, _) in enumerate(train_data_loader):\n yhat = model(image_data)\n make_dot(yhat, params=dict(model.named_parameters())).render(\"base_network\",format=\"png\")\n break\n\n # Placeholders of training loss, testing loss along with indices\n train_losses = []\n train_indices = []\n test_losses = []\n test_indices = [epoch*len(train_data_loader.dataset) for epoch in range(number_of_epochs+1)]\n\n # Test error without training the model\n test_loss, _ = test_network(model=model, test_data_loader=test_data_loader)\n test_losses.append(test_loss)\n\n #Train the network for number of epochs\n for epoch in range(1, number_of_epochs+1, 1):\n losses, counter = train_network_single_epoch(model=model, train_data_loader=train_data_loader,\n optimizer=optimizer, log_interval = log_interval,\n epoch = epoch, batch_size=train_batch_size)\n train_losses.extend(losses)\n train_indices.extend(counter)\n test_loss, _ = test_network(model=model, test_data_loader=test_data_loader)\n test_losses.append(test_loss)\n\n # Visualize the training of the model over epochs\n visualize_errors_over_training(train_idx=train_indices, train_errors=train_losses, test_idx=test_indices, test_errors=test_losses)\n\n # Store the model\n torch.save(model.state_dict(), 'models/final_model.pth')\n print(\"Successfully saved the model at models/final_model.pth\")\n\n # Standard exit status is 0\n return 0\n\nif __name__ == \"__main__\":\n main()\n ","repo_name":"PraphulSamavedam/CS-5330-Pattern-Recognition-and-Computer-Vision","sub_path":"Assignment5/CharacterRecognition/src/train_basic.py","file_name":"train_basic.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71938552584","text":"import re\nimport os\nfrom itertools import count\nfrom language.heuristic.littletools.nested_list_tools import flatten_reduce, curry\nimport fnmatch\n\nimport pandas as pd\nimport copy\n\nimport logging\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n\nimport spacy\nnlp = spacy.load('en_core_web_sm')\n\ndef find_position_in_doc_by_approx(doc, text_token, pos, deviation=10):\n deviator = iterate_away(pos, deviation)\n for p in deviator:\n if p < 0:\n continue\n if p >= len(doc):\n continue\n if text_token == doc[p].text or ((text_token in ['’'] or len(text_token)<2) and text_token in doc[p].text):\n return p\n else:\n logging.error(\"Token '%s' not seen in spacy doc (search tokens: '%s')! returning starting position, '%s\" %\n (text_token,\n str([w.text for w in doc[pos - deviation:pos + deviation]]),\n str(doc[pos])))\n return pos\n\ndef iterate_away(pos, deviation):\n yield pos\n for d in range(1, deviation):\n yield pos + d\n yield pos - d\n\nclass CorpusReader:\n def __init__(self, corpus_path=None, only=None):\n ''' This module reads conll files into a DataFrama.\n\n Conll files have here to be read, they are written line per word.\n It is collapsed into one line per sentence here.\n\n Also some datatypes are parsed into a more appropriate datastructure than strings\n The spacy doc's are build here, overwritten with grammar from the conll files to use the datastructure of spracy.\n Also the coreference mention tags are parsed to some dict.\n\n :param corpus_path: path to the dir that contains a folder 'import', where the conll files are\n :param only: restrict the conll files that should be read.\n\n '''\n if not corpus_path:\n raise AttributeError(\"path must be given!\")\n self.corpus_path = corpus_path\n\n # loand conlls into dicts and build dataframe with one line per word\n all_sentences, all_conlls = self.load_all_conlls(corpus_path, only=only)\n self.df = pd.DataFrame(all_conlls)\n\n # translate also normal conll-nodes as well as invisible nodes to some nice id\n # groupby makes the grouped value invisible to the function, copy it\n self.df['s_id2'] = self.df ['s_id'] #\n self.invisible_translation_dict = {}\n self.df = self.df.groupby('s_id').apply(lambda x: self.renumerate_word_ids(x, update_translation_dict=self.invisible_translation_dict))\n\n # parse columns to int\n num_cols = [\"s_id\", \"head_id\", \"id\"]\n self.df[num_cols] = self.df[num_cols].astype(int)\n\n # from dataframe with one line per word get another with one line to one sentence\n self.sentence_df = self.df.groupby('s_id').apply(self.aggregate_sentences)\n \n\n def renumerate_word_ids(self, x, update_translation_dict=None):\n ''' In conll files normal id's are ints, but invisible nodes have to be re-enumerated.\n\n If you annotated `hidden nodes` with this syntax for the id: '1.1', they are recounted here.\n\n See this discussion here:\n .. _Null subjects: http://www.python.org/https://github.com/UniversalDependencies/docs/issues/589\n That also happens, if there are coordinative bindings in the sentences, that re-use the tokens for their\n grammatical well-formedness\n\n :param x: pandas Series of Enhanced UD\n :param update_translation_dict: collect all the ids translated, for translating coref, that must happen after translating everything\n :return: index list\n\n '''\n try:\n s_id = x['s_id2'].iloc[0]\n except KeyError:\n raise\n invisible_nodes_translation_dict = dict(zip(x['inv_id'],list(range(len(x)))))\n update_translation_dict.update({s_id:invisible_nodes_translation_dict})\n x['id'] = invisible_nodes_translation_dict.values()\n try:\n x['head_id'] = [ invisible_nodes_translation_dict[h] for h in x['inv_head_id']]\n except KeyError:\n a = 1\n return x\n\n\n def override_spacy (self, tldpth):\n t = tldpth[-1]\n t.lemma_ = tldpth [0]\n t.dep_ = tldpth [1]\n t.pos_ = tldpth[2]\n t.tag_ = tldpth[3]\n t.head = t.doc[tldpth[4]]\n return t\n\n\n def aggregate_sentences(self, x):\n doc = nlp(' '.join(x['text']))\n doc = [self.override_spacy (tup) for tup in zip(x['lemma'], x['dep_'], x['pos_'], x['tag_'], x['head_id'], doc)]\n corefs = self.parse_coref_str(x['coref'])\n return pd.Series({'text': ' '.join(x['text']),\n 'text_pos_': ' '.join([text + '_' + pos for text, pos in zip(x['text'], x['pos_'])]),\n 's_id': x['s_id2'].iloc[0],\n 'spacy_doc': doc,\n 'coref': corefs\n })\n\n def parse_coref_str(self, coref_string):\n ''' parses coref mentions in two formats. Either ranges or lists of token indices\n\n 7->[6,1,2,3] makes s_id = 7, i_list = [5,0,1,2]\n 1->[22:23] makes s_id = 1, i_list = [21]\n 8->[12.1:15] makes s_id = 8, i_list = []\n\n :param coref_string: such a string\n :return: coref dict with 's_id' and 'i_list'\n\n '''\n mfas = [re.finditer(r\"(((?P\\d+)->\\[(?P\\d+(?:\\.\\d+)?):(?P\\d+(?:\\.\\d+)?)\\])|((?P\\d+(?:\\.\\d+)?)->(?P\\[(?:\\d+(?:\\.\\d+)?)(?:,\\s*\\d+(?:\\.\\d+)?)*\\])))+\", y) for y in coref_string]\n return [[self.parse_coref_dict(m.groupdict()) for m in mfa] if mfa else [] for mfa in mfas]\n\n\n invisible_node = re.compile(r\"\\d+(?:\\.\\d+)?\")\n def parse_invisible_node(self, s_id, node):\n try:\n return self.invisible_translation_dict [s_id][node]\n except:\n raise\n\n\n def parse_coref_dict(self, d):\n ''' Parses the content of the coref_dict to some typed data\n\n :param d: dict with either 's_id_r' and 'm_start', 'm_end' or 's_id_i' and 'i_list'\n :return: coref dict with 's_id' and 'i_list'\n\n '''\n if d['s_id_r']:\n s_id = d['s_id_r']\n try:\n m_start = self.parse_invisible_node(s_id, d['m_start'])\n m_end = self.parse_invisible_node(s_id, d['m_end'])\n return {'s_id' : int(s_id),\n 'i_list' : list(range (m_start, m_end))}\n except KeyError:\n logging.warning('Coreference out of read text window. Ignoring this. ')\n return {}\n elif d['s_id_i']:\n s_id = d['s_id_i']\n nodes = self.invisible_node.finditer(d['i_list'])\n i_list = [self.parse_invisible_node(s_id, i.group(0)) for i in nodes]\n return {'s_id': int(s_id),\n 'i_list': i_list}\n else:\n raise ValueError ('coref dict with bad indices')\n\n\n def lemmatize_text(self):\n return \" \".join(self.df['lemma'].tolist())\n\n def read_one_conll (self,fname, s_id_dict):\n sentence = []\n conll_lines = []\n\n with open(fname, 'r') as fh:\n for i, line in enumerate (fh):\n try:\n sentence.append(re.search(r'(?:^\\d+\\.?\\d*\\t)([^\\t]+)', line).group(1))\n conll_line_dict = self.conll_line2match(line).groupdict()\n conll_line_dict.update(s_id_dict)\n conll_lines.append(conll_line_dict)\n except AttributeError:\n raise SyntaxError(\n \"wrong syntax in file %s, line no. %d line:\\n'%s' (Maybe an empty\"\n \"line at the end of your conll\" % (fname, i, line))\n if not line.strip():\n line = last\n break\n last = line\n pass\n\n return conll_lines, \" \".join(sentence)\n\n def load_all_conlls (self, path, only=None):\n all_sentences = []\n all_conlls = []\n\n def hasNumbers(inputString):\n return any(char.isdigit() for char in inputString)\n\n for filename in sorted(os.listdir(path), key =lambda x: int(''.join(filter(str.isdigit , x))) if hasNumbers(x) else 0):\n if fnmatch.fnmatch(filename, '*.conll'):\n s_id_dict = re.search(\"(?P[0-9]+)\" , filename).groupdict()\n if (only and int (s_id_dict['s_id']) not in only):\n continue\n filename = os.path.join(path, filename)\n conll_lines, sentence = self.read_one_conll(filename, s_id_dict)\n all_sentences.append (sentence)\n all_conlls.append (conll_lines)\n all_conlls = flatten_reduce(all_conlls)\n\n return all_sentences, all_conlls\n\n def load_conll (self, i, corpus_path):\n if isinstance(i, list):\n docs = []\n for j in i:\n print (j)\n docs.append(self.load_conll(j, corpus_path))\n return docs\n\n fname = corpus_path + \"/\" + str (i) + '.conll'\n sentence = []\n last = ''\n with open(fname, 'r') as fh:\n for line in fh:\n try:\n sentence.append(re.search(r'(?:^\\d+\\t)([^\\t]+)', line).group(1))\n except AttributeError:\n print (i, \"'\"+line+\"'\")\n raise\n if not line.strip():\n line = last\n break\n last = line\n\n pass\n doc = self.nlp(\" \".join(sentence))\n new_doc = self.conll_over_spacy(doc, fname)\n return new_doc\n\n pattern = re.compile( r\"\"\"(?P(?P\\d+)((?:\\.)?(?P\\d+))?) # i (as well es hidden node-ids in conll-u format)\n \\t(?P.*?) # whitespace, next bar, n1\n \\t(?P.*?) # whitespace, next bar, n1\n \\t(?P.*?) # whitespace, next bar, n2\n \\t(?P.*?) # whitespace, next bar, n1\n \\t(?P.*?)# whitespace, next bar, n1\n \\t(?P(?P\\d+)((?:\\.)?(?P\\d+))?) # head_id (as well es hidden node-ids in conll-u format)\n \\t(?P.*?) # whitespace, next bar, n2\n \\t(?P.*?)# whitespace, next bar, n1\n \\t(?P.*)# whitespace, next bar, n1\n \"\"\", re.VERBOSE)\n\n def conll_line2match(self, line):\n match = self.pattern.match(line)\n return match\n\n col_set = ['i','text', 'lemma','pos','tag','nothing','head','dep','spacy_i','coref']\n def conll_over_spacy(self, doc, dir, i, no_cols={}):\n to_change = set(self.col_set) - set(no_cols)\n fname = str (i) + '.conll'\n path = dir + \"/\" + fname\n\n # read conll_files, may manipulated over spacy\n with open(path) as f:\n for line in f:\n match = self.conll_line2match(line)\n i = int(match.group(\"id\")) - 1\n head_i = int(match.group(\"head_id\")) - 1\n doc[i].set_extension('coref', default = list(), force=True)\n try:\n if 'head' in to_change:\n doc[i].head = doc[head_i]\n if 'lemma' in to_change:\n doc[i].lemma_ = match.group(\"pos_\")\n if 'pos' in to_change:\n doc[i].pos_ = match.group(\"pos_\")\n if 'tag' in to_change:\n doc[i].tag_ = match.group(\"tag_\")\n if 'dep' in to_change:\n doc[i].dep_ = match.group(\"dep_\")\n #if 'spacy_i' in to_change:\n # doc[i].i = match.group(\"spacy_i\")\n if 'coref' in to_change:\n doc[i]._.coref= match.group(\"coref\")\n\n except IndexError:\n raise ValueError(\"Shape of the spacy doc and conll file incongruent, look for the number of tokens! '%s'\" % (str(doc)))\n return doc\n\n def coref_lookup(self, corefs, what= None):\n if not corefs:\n return []\n for coref in corefs:\n s_id = coref['s_id']\n m_start = coref['m_start']\n m_end = coref['m_end']\n if what ==\"sub_pred\":\n all_subpreds = flatten_reduce(flatten_reduce(\n [[p['part_predications'] for p in ps]\n for ps in self.sentence_df.query(\"s_id == @s_id\")['predication']]\n ))\n return [sp for sp in all_subpreds if all([m in sp['full_ex_i'] for m in range(m_start, m_end)])]\n else:\n return self.sentence_df.query(\"s_id == @s_id\")['spacy_doc'].values[0][m_start-1:m_end-1]\n\n conll_format = \"%d\\t%s\\t%s\\t%s\\t%s\\t%s\\t%d\\t%s\\t%s\\t%s\"\n def export_dict (self, doc, index=None):\n res = []\n w_counter = count(0)\n\n for word in doc:\n i = next(w_counter)\n if word.head is word:\n head_idx = 0\n else:\n head_idx = doc[i].head.i+1\n #coref = self.extract_coref_from_spacy_neucoref (doc, word, i)\n\n res.append(\n { 's_id' : index,\n 'i' : i+1,\n 'text' : word.text,\n 'lemma' : word.lemma_,\n 'pos' : word.pos_, #\n 'tag' : word.tag_, #\n 'unknown': '_',\n 'head' : head_idx,\n 'dep' : word.dep_, # Relation\n 'corp_id': str(index)+'-'+str(word.i), # Generation_i\n 'doc_i' : word.i,\n #'coref' : coref\n }\n )\n return res\n\n\n def explode(df, column_to_explode):\n \"\"\"\n Similar to Hive's EXPLODE function, take a column with iterable elements, and flatten the iterable to one element\n per observation in the output table\n\n :param df: A dataframe to explod\n :type df: pandas.DataFrame\n :param column_to_explode:\n :type column_to_explode: str\n :return: An exploded data frame\n :rtype: pandas.DataFrame\n \"\"\"\n\n # Create a list of new observations\n new_observations = list()\n\n # Iterate through existing observations\n for row in df.to_dict(orient='records'):\n\n # Take out the exploding iterable\n explode_values = row[column_to_explode]\n del row[column_to_explode]\n\n # Create a new observation for every entry in the exploding iterable & add all of the other columns\n for explode_value in explode_values:\n # Deep copy existing observation\n new_observation = copy.deepcopy(row)\n\n # Add one (newly flattened) value from exploding iterable\n new_observation[column_to_explode] = explode_value\n\n # Add to the list of new observations\n new_observations.append(new_observation)\n\n # Create a DataFrame\n return_df = pd.DataFrame(new_observations)\n\n # Return\n return return_df\n\n def annotate_corefs (self, doc, df):\n df['coref'] = [[] for _ in range(len(df))]\n\n def element_rest (l):\n for i, e in enumerate (l):\n yield e, l[:i]+l[i+1:]\n def ref_from_row (r):\n try:\n row = df.query('doc_i in @r')\n except KeyError:\n print (\"not found?\")\n if len (row.s_id.values) == 0 :\n ba =' ta'\n return 'out of range?'\n return str(row.s_id.values[0]) + \"->\" + str(row.i.values[0])\n\n return \",\".join(other_sents)\n\n if doc._.has_coref:\n for cl in doc._.coref_clusters:\n for ment, rest_ments in element_rest (cl):\n ids = range(ment.start, ment.end)\n other_sents = [ref_from_row(range(r.start, r.end)) for r in rest_ments]\n df.loc[df['doc_i'].isin(ids), 'coref'] += other_sents\n\n df.coref = df.coref.apply (lambda x: \",\".join(x) if x else '_')\n return None\n\n def write_conll_by_df_group(self, x):\n x = x\n conll_lines = []\n for row in x.itertuples():\n conll_lines.append(CorpusReader.conll_format % (\n row.i, # There's a word.i attr that's position in *doc*\n row.text,\n row.lemma,\n row.pos, # Coarse-grained tag\n row.tag, # Fine-grained tag\n row.unknown,\n row.head,\n row.dep, # Relation\n row.corp_id, # Generation_i\n row.coref))\n\n conll_path = self.export_dir + '/' + str(row.sent_id) + '.conll'\n with open(conll_path, 'w+') as f:\n f.write (\"\\n\".join (conll_lines) +'\\n')\n\n return None\n\n\n\n\n","repo_name":"c0ntradicti0n/distinctiopus","sub_path":"corpus_reader.py","file_name":"corpus_reader.py","file_ext":"py","file_size_in_byte":17432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7657735558","text":"\"\"\"\nGiven an array with n objects colored red, white or blue, sort them so that\nobjects of the same color are adjacent, with the colors in the order red, white\nand blue.\n\nHere, we will use the integers 0, 1, and 2 to represent the color red, white,\nand blue respectively.\n\"\"\"\n\n\ndef sort_colors(nums):\n zero_idx = one_idx = 0\n two_idx = len(nums) - 1\n while one_idx <= two_idx:\n if nums[one_idx] == 1:\n one_idx += 1\n elif nums[one_idx] == 0:\n nums[zero_idx], nums[one_idx] = nums[one_idx], nums[zero_idx]\n zero_idx += 1\n one_idx += 1\n else:\n nums[one_idx], nums[two_idx] = nums[two_idx], nums[one_idx]\n two_idx -= 1\n","repo_name":"CodersInSeattle/InterviewProblems","sub_path":"problems/sorting/sort_colors.py","file_name":"sort_colors.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"25212064924","text":"\"\"\"\nAvoid using default `language` field for text indexes\nas it would throw an error when saving document with\nunsupported language.\n\nSo instead we should define indexes with custom language\nfield (via `language_override`) and only set it when it's supported.\n\"\"\"\n\n_TEXT_MONGO_LANGUAGE = \"_mongo_language\"\n\n# https://docs.mongodb.com/manual/reference/text-search-languages/#text-search-languages\n_TEXT_SUPPORTED_LANGUAGES = {\n \"da\",\n \"nl\",\n \"en\",\n \"fi\",\n \"fr\",\n \"de\",\n \"hu\",\n \"it\",\n \"nb\",\n \"pt\",\n \"ro\",\n \"ru\",\n \"es\",\n \"sv\",\n \"tr\",\n}\n\nTEXT_INDEX_OPTIONS = {\n \"background\": True,\n \"language_override\": _TEXT_MONGO_LANGUAGE,\n}\n\n\ndef set_mongo_lang(doc):\n \"\"\"Mongo only supports certain languages and won't story document with unsupported one.\"\"\"\n if doc.get(\"language\"):\n mongo_lang = get_mongo_language(doc[\"language\"])\n if mongo_lang:\n doc[_TEXT_MONGO_LANGUAGE] = mongo_lang\n\n\ndef get_mongo_language(lang):\n if not lang:\n return\n lang = lang.split(\"-\")[0].split(\"_\")[0]\n return lang if lang in _TEXT_SUPPORTED_LANGUAGES else None\n","repo_name":"superdesk/superdesk-core","sub_path":"superdesk/mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"17892843838","text":"#Data size lenghts to test \nnums=[1000,500,100,50,25,10]\n\n#Library of ODE solutions \nt_lib=[np.linspace(0,2,num=val) for val in nums]\nsol_lib=[ODESolution(clib,kc) for clib in t_lib]\n\n#Library of simulated data \nnoises=[[np.random.uniform(low=-1,high=1)/20 for val in sol] for sol in sol_lib]\nsignal=[MakeNoisyData(sol,nos) for sol,nos in zip(sol_lib,noises)]\n\n#Parameter estimation an performance evaluation \nparams=[curve_fit(ODESolution,times,signals)[0][0] for times,signals in zip(t_lib,signal)] \nsolutions=[ODESolution(times,kS) for times,kS in zip(t_lib,params)]\n","repo_name":"TavoGLC/DataAnalysisByExample","sub_path":"ParameterEstimation/Fragments/ODEFiting01/Fragment03.py","file_name":"Fragment03.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"81"} +{"seq_id":"70125809546","text":"import sys\nimport datetime\n\nsys.path.insert(0, '/home/pi/Desktop/SED/RFIDoors/')\nsys.path.insert(0, '/home/pi/Desktop/SED/RFIDoors/administrador/rfidoors/src/server/')\n\n\nfrom scripts.rfid import rfidLectura;\nfrom scripts.rfid import rfidEscritura;\nfrom scripts.servo import movimientoServo;\nfrom scripts.uart import escrituraUart;\nfrom scripts.uart import lecturaUart;\nfrom scripts.ultrasonido import datosUltrasonido;\nfrom bbdd import consultarBBDD;\n\n\nmensaje = \"\"\nfechaValida = \"\"\n\nwhile (True):\n\tmensaje = lecturaUart.recibirDatoUART(30)\n\n\t##TIPOS DE MENSAJE\n\t# - Preguntar por una llava a la bbdd -> {id:llave}\n\t# - Responder sobre una llave -> {respuestaBBDD:si}\n\t# - Solicitar nueva acceso a bbdd -> {acceso:llave}\n\n\n\tif (mensaje[1:3] == \"id\"):\n\n\t\tprint(\"UART\")\n\n\t\texistencia = consultarBBDD.existeUsuario(mensaje[4:-1])\n\n\t\tprint(existencia)\n\n\t\tif(existencia >= 1):\n\t\t\tfechaValida = consultarBBDD.obtenerFechaValidez(mensaje[4:-1])\n\t\t\tprint(fechaValida)\n\n\t\t\tif (str(fechaValida[0]) > str(datetime.datetime.now())):\n\t\t\t\tprint(\"Existe con fecha valida\")\n\t\t\t\tmensaje = \"{respuestaBBDD:si}\"\n\t\t\telse:\n\t\t\t\tprint(\"existe con fecha no valida\")\n\t\t\t\tmensaje = \"{respuestaBBDD:no}\"\n\t\telse:\n\t\t\tprint(\"No existe\")\n\t\t\tmensaje = \"{respuestaBBDD:no}\"\n\n\t\tprint(\"Envio respuesta\")\n\t\tescrituraUart.mandarMensajeUART(mensaje)\n\n\tif (mensaje[1:7] == \"acceso\"):\n\t\tfecha = datetime.datetime.now()\n\t\tllave = mensaje[8:-1]\n\t\tconsultarBBDD.nuevoAccesoHistorial(llave, fecha)","repo_name":"ivanClass/SED","sub_path":"RFIDoors/administrador/consultarBBDD.py","file_name":"consultarBBDD.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25819717007","text":"# Optimization: Finds the FEWEST number of coins to return the change\ndef coinChange(coins, amount):\n dp = [amount + 1] * (amount + 1)\n dp[0] = 0\n for money in range(1, amount + 1):\n for coin in coins:\n if money - coin >= 0:\n dp[money] = min(1 + dp[money - coin], dp[money])\n return dp[amount] if dp[amount] != amount + 1 else -1\n\n\n# Combinatoric: Finds the total ways to return the change\ndef coinChangeWays(coins, amount):\n dp = [0] * (amount + 1)\n dp[0] = 1\n for money in range(1, amount + 1):\n for coin in coins:\n if money - coin >= 0:\n dp[money] += dp[money - coin]\n return dp[amount]\n\n\ncoins = [1, 2, 5]\namount = 11\nprint(coinChange([2], 3))\nprint(coinChange(coins, amount)) # Output: 3\nprint(coinChange(coins, 13)) # 4\n","repo_name":"miray-mustafov/LeetCode","sub_path":"11.Dynamic Programming/1D/322. Coin Change.py","file_name":"322. Coin Change.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29945195156","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import patches\nimport subprocess as sp\n\n\nclass Kohonen:\n def __init__(self, data, iterations=5000, learning_rate=0.01, normalize_data=True, normalize_by_column=False, network_x_dimension=40, network_y_dimension=40):\n self.iterations = iterations\n self.learning_rate = learning_rate\n self.normalize_data = normalize_data\n self.normalize_by_column = normalize_by_column\n self.network_dimensions = np.array([network_x_dimension, network_y_dimension])\n self.init_radius = max(self.network_dimensions[0], self.network_dimensions[1]) / 2\n self.time_constant = iterations / np.log(self.init_radius)\n self.raw_data = data\n self.data = None\n self.network = np.random.random((self.network_dimensions[0], self.network_dimensions[1], self.raw_data.shape[0]))\n\n def _find_bmu(self, t):\n \"\"\"\n to find best matching unit\n \"\"\"\n m = self.raw_data.shape[0]\n bmu_idx = np.array([0, 0])\n min_dist = np.iinfo(np.int).max\n\n for x in range(self.network.shape[0]):\n for y in range(self.network.shape[1]):\n w = self.network[x, y, :].reshape(m, 1)\n sq_dist = np.sum((w - t) ** 2)\n sq_dist = np.sqrt(sq_dist)\n if sq_dist < min_dist:\n min_dist = sq_dist # dist\n bmu_idx = np.array([x, y]) # id\n\n bmu = self.network[bmu_idx[0], bmu_idx[1], :].reshape(m, 1)\n return bmu, bmu_idx\n\n @staticmethod\n def decay_radius(init_radius, i, time_constant):\n return init_radius * np.exp(-i / time_constant)\n\n @staticmethod\n def decay_learning_rate(initial_learning_rate, i, n_iterations):\n return initial_learning_rate * np.exp(-i / n_iterations)\n\n @staticmethod\n def calculate_influence(distance, radius):\n return np.exp(-distance / (2 * (radius ** 2)))\n\n def show_percentage(self, i):\n sp.call('clear', shell=True)\n print(\"Learning in Progress: \" + str(i/self.iterations*100) + \"%\")\n\n def normalize(self):\n data = self.raw_data\n if self.normalize_data:\n if self.normalize_by_column:\n col_maxes = self.raw_data.max(axis=0)\n data = self.raw_data / col_maxes[np.newaxis, :]\n else:\n data = self.raw_data / data.max()\n self.data = data\n\n def train(self):\n for i in range(self.iterations + 1):\n self.show_percentage(i)\n # print(\"Iteration %d Completed\" % i)\n # t is reshaped data\n t = self.data[:, np.random.randint(0, self.raw_data.shape[1])].reshape(np.array([self.raw_data.shape[0], 1]))\n bmu, bmu_idx = self._find_bmu(t)\n # r is radius\n r = self.decay_radius(self.init_radius, i, self.time_constant)\n # l is new learning rate\n new_learning_rate = self.decay_learning_rate(self.learning_rate, i, self.iterations)\n\n for x in range(self.network.shape[0]):\n for y in range(self.network.shape[1]):\n w = self.network[x, y, :].reshape(self.raw_data.shape[0], 1)\n w_dist = np.sum((np.array([x, y]) - bmu_idx) ** 2)\n w_dist = np.sqrt(w_dist)\n\n if w_dist <= r:\n influence = self.calculate_influence(w_dist, r)\n new_w = w + (new_learning_rate * influence * (t - w))\n self.network[x, y, :] = new_w.reshape(1, 3)\n\n def show(self, save=True):\n fig = plt.figure()\n\n ax = fig.add_subplot(111, aspect='equal')\n ax.set_xlim((0, self.network.shape[0] + 1))\n ax.set_ylim((0, self.network.shape[1] + 1))\n ax.set_title('Kohonen after %d iterations' % self.iterations)\n\n # plot\n for x in range(1, self.network.shape[0] + 1):\n for y in range(1, self.network.shape[1] + 1):\n ax.add_patch(patches.Rectangle((x - 0.5, y - 0.5), 1, 1,\n facecolor=self.network[x - 1, y - 1, :],\n edgecolor='none'))\n if save:\n plt.savefig('Kohonen%d.png' % self.iterations)\n plt.show()\n\n\n\n\n","repo_name":"miladibra10/KohonenColorMapper","sub_path":"Kohonen.py","file_name":"Kohonen.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"733511381","text":"# PYTTSX3 is a text-to-speach connversion libraary in python\r\n# unlike alternative libraries, it works offline, and is compatible with both python 2 and python 3\r\n\r\nimport pyttsx3\r\nimport datetime \r\nimport speech_recognition as sr\r\nimport wikipedia\r\nimport webbrowser\r\nimport os\r\nimport smtplib\r\nimport requests \r\nimport json\r\nimport time\r\nimport pyautogui\r\nimport pymysql\r\nimport pygame as py\r\nimport random\r\nimport math\r\nfrom auigui import *\r\nfrom pred import *\r\n\r\nobj1 = gui()\r\nobj1.start()\r\n\r\nengine = pyttsx3.init('sapi5') # object creation, The Speech Application Programming Interface or SAPI is an API developed\r\n # by Microsoft to allow the use of speech recognition and speech synthesis within Windows applications.\r\n\r\nvoices = engine.getProperty(\"voices\") # getting deatils of current speaking voices #print(voices[0].id) It will all voices available in syste, We only have female voice Zira\r\nengine.setProperty('voice' , voices[1].id) # setting up a new voice voices[0]\r\n\r\ndef wish():\r\n hour = int(datetime.datetime.now().hour)\r\n if hour >= 0 and hour < 12:\r\n speak(\"Good Morning\")\r\n elif hour >= 12 and hour <= 16:\r\n speak(\"Good Afternoon\")\r\n else:\r\n speak(\"Good Evening\")\r\n speak(\"Nova at your service\")\r\n speak(\"How may i help you\")\r\n\r\ndef speak(audio):\r\n gui.wave = False\r\n gui.circle = True\r\n engine.say(audio)\r\n engine.runAndWait()\r\n\r\ndef takecommand():\r\n '''\r\n It takes microphone input from user and returns string output and for converting speech to text it requires internet\r\n '''\r\n\r\n gui.wave = True\r\n gui.circle = False\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening....\")\r\n r.pause_threshold = 0.8 # kitna tej bole yaha se control hoga\r\n r.adjust_for_ambient_noise(source , duration=1)\r\n r.non_speaking_duration = 0.3\r\n audio = r.listen(source) # It listens what ever we say\r\n\r\n\r\n try:\r\n print(\"Recognizing....\")\r\n query = r.recognize_google(audio, language='en-in')\r\n print(\"User Said : \", query)\r\n\r\n except Exception as e:\r\n print(e)\r\n print(\"Say that Again Please....\")\r\n speak(\"Say that Again Please....\")\r\n return \"None\"\r\n return query\r\n\r\ndef newsfromBBC():\r\n # BBC news api\r\n main_url = \"https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=a05cf53c56bf4c0882db52aed506e3bc\"\r\n\r\n # Fetching Data in Json format\r\n open_page = requests.get(main_url).json()\r\n\r\n # Getting all article in a string article \r\n article = open_page[\"articles\"]\r\n\r\n # Empty list which will contain all ternding news\r\n results = []\r\n\r\n for ar in article:\r\n results.append(ar[\"title\"])\r\n speak(\"The headlines are\")\r\n\r\n for i in range(len(results)):\r\n\r\n # Print all trending news\r\n print(i+1, results[i])\r\n speak(results[i])\r\nt = time.localtime()\r\ntimestamp = time.strftime('%b-%d-%Y_%H%M', t).lower()\r\nfilename = 'Screenshot' + timestamp + '.jpeg'\r\n\r\ndef imgsearch(text):\r\n urlp = \"https://www.google.com/search?hl=en&tbm=isch&source=hp&biw=1745&bih=861&ei=bVgwXcSvKMzUvASsvr24BQ&q=\"+text\r\n urlm = urlp+\"&oq=\"+text+\"&gs_l=img.3..0l10.6121.7120..7308...0.0..0.242.1136.2j5j1......0....1..gws-wiz-img.....0..35i39.IvSjTlBizUM&ved=0ahUKEwiEmr2Kr77jAhVMKo8KHSxfD1cQ4dUDCAU&uact=5\"\r\n webbrowser.open(urlm)\r\n\r\ndef say_joke():\r\n # main_url = \"http://api.icndb.com/jokes/random\"\r\n main_url = \"https://api.chucknorris.io/jokes/random\" # \"https://hindi-jokes-api.onrender.com\"\r\n open_page = requests.get(main_url).json()\r\n # joke = open_page[\"value\"][\"joke\"]\r\n joke = open_page[\"value\"]\r\n print(f\"Joke : {joke}\")\r\n speak(joke)\r\n\r\ndef capture():\r\n # Takes screenshot\r\n img = pyautogui.screenshot()\r\n\r\n # Save the image\r\n img.save(\"D:\\\\Nova Voice assistant\\\\ScreenShot\\\\\" + filename)\r\n\r\n # Show the image\r\n # img.show()\r\n \r\ndef weather(text):\r\n \r\n try:\r\n api_key_weather = \"23917b44d9e7de5e3fe0876778777519\"\r\n main_url = \"http://api.openweathermap.org/data/2.5/weather?q=\"+text+\",in&appid=\"+api_key_weather\r\n open_page = requests.get(main_url).json()\r\n\r\n condition = open_page['weather'][0]['description'] # weather condition in string\r\n temp = str(round(open_page['main']['temp'] - 273.165)) + 'degree celsius' # temp in C\r\n pressure = str(open_page['main']['pressure']) + 'mili Bar' # pressure in mBar\r\n humidity = str(open_page['main']['humidity']) + 'percent' # humidity in percentage %\r\n wind_speed = str(open_page['wind']['speed']) + 'meter per second' # wind speed in m/s\r\n wind_angle = str(open_page['wind']['deg']) + 'degree' # angle of wind\r\n\r\n print(f\"Weather at {text.capitalize()} is as follows\")\r\n speak(f\"Weather at {text} is as follows\")\r\n\r\n print(f\"Condition {condition}\")\r\n speak(f\"condition {condition}\")\r\n\r\n print(f\"Temperature {temp}\")\r\n speak(f\"Temperature {temp}\")\r\n\r\n print(f\"Pressure {pressure}\")\r\n speak(f\"Pressure {pressure}\")\r\n\r\n print(f\"Humidity {humidity}\")\r\n speak(f\"Humidity {humidity}\")\r\n\r\n print(f\"Wind Speed {wind_speed}\")\r\n speak(f\"Wind Speed {wind_speed}\")\r\n\r\n print(f\"At an Angle of {wind_angle}\")\r\n speak(f\"At an Angle of {wind_angle}\")\r\n\r\n database_weather(condition, temp, pressure, humidity, wind_speed, wind_angle, text)\r\n\r\n\r\n\r\n except Exception as e:\r\n print(\"Sorry! Connection Failed - Please try again \", e)\r\n speak(\"Sorry \")\r\n speak(\"connection Failed Please try again\")\r\n\r\n\r\ndef database_weather(condition, temp, pressure, humidity, wind_speed, wind_angle, place):\r\n # print(\"db wether\")\r\n # return\r\n\r\n try:\r\n\r\n conn = pymysql.connect(host=\"127.0.0.1\", user=\"root\", passwd='', db='my_python') # creates connection object\r\n mycursor = conn.cursor() # It will allow to fire SQL Query\r\n\r\n date = datetime.datetime.now()\r\n date = str(date)\r\n\r\n url2 = \"INSERT INTO weather (conditions, temperature, pressure, humidity, speed, angle, datetime, place) VALUES('\"+condition+\"','\"+temp+\"','\"+pressure+\"','\"+humidity+\"','\"+wind_speed+\"','\"+wind_angle+\"','\"+date+\"','\"+place+\"')\"\r\n mycursor.execute(url2)\r\n print()\r\n # Fires Query\r\n\r\n conn.commit() # Save changes in Mysql\r\n except Exception as e:\r\n print(e)\r\n finally:\r\n conn.close()\r\n\r\ndef database(name, age, gender, hobbies, qualification, favfood):\r\n # print(\"db\")\r\n # return\r\n\r\n try:\r\n conn = pymysql.connect(host=\"127.0.0.1\", user=\"root\", passwd='', db='my_python') # creates connection object\r\n\r\n mycursor = conn.cursor() # It will allow to fire SQL Query\r\n\r\n url = \"INSERT INTO information (Name, Age, Gender, Hobbies, Qualification, Favfood) VALUES('\"+name+\"','\"+age+\"','\"+gender+\"','\"+hobbies+\"','\"+qualification+\"','\"+favfood+\"')\"\r\n mycursor.execute(url)\r\n print()\r\n # Fires Query\r\n\r\n conn.commit() # Save changes in Mysql\r\n except Exception as e:\r\n print(e)\r\n finally:\r\n speak(\"Thankyou for the co-operation\")\r\n conn.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n wish()\r\n\r\n while True:\r\n query = takecommand().lower() # wee are converting the query in lower while searching any word we pass it in lower case\r\n #Logic to execute tast based query\r\n if 'yourself' in query:\r\n Yourself()\r\n elif 'what can you do' in query:\r\n speak(\"Sure sir I would be happy shairing details about me with you\")\r\n what_can_you_do()\r\n elif 'headlines' in query:\r\n newsfromBBC()\r\n \r\n elif 'jokes' in query:\r\n speak(\"Get Ready you won't be able to control your laughter\")\r\n say_joke()\r\n say_joke()\r\n say_joke()\r\n say_joke()\r\n say_joke()\r\n\r\n\r\n elif'search' in query:\r\n speak(\"Sure sir tell what do you want to search\")\r\n img = takecommand()\r\n imgsearch(img) \r\n \r\n elif 'surfing' in query:\r\n speak(\"Sure but please tell me want you want to search\")\r\n text = takecommand().lower()\r\n if \"youtube\" in text:\r\n text = text.replace('search', \"\")\r\n text = text.replace(\"youtube\", \"\")\r\n text = text.split(\" \")\r\n text = \"+\".join(text[4:])\r\n url = 'https://www.youtube.com/results?search_query='\r\n url = url + text\r\n print(url)\r\n webbrowser.open(url)\r\n\r\n elif \"search\" in text:\r\n text = text.replace('search', \"\").split(\" \")\r\n text = \"+\".join(text[1:])\r\n urlp = 'https://www.google.com/search?source=hp&ei=txgvXfO1Cov_vAT_9oq4Dg&q='\r\n urlm = '&oq='\r\n urls = '&gs_l=psy-ab.3..0j0i22i30l9.1636.9226..9363...2.0..0.395.4581.0j11j5j4....2..0....1..gws-wiz.....10..35i39j0i67j0i131j0i20i263.GsXvyMxzL-0'\r\n url = urlp + text + urlm + text + urls\r\n print(url)\r\n webbrowser.open(url)\r\n\r\n elif 'play music' in query:\r\n speak(\"Be ready to rock and roll\")\r\n music_dir = 'D:\\\\Song'\r\n songs = os.listdir(music_dir) # It will list all the songs in our music directory\r\n print(songs)\r\n speak(\"Dj play music\")\r\n os.startfile(os.path.join(music_dir, songs[0])) # It will play the first song we can use random number technique to play any random song\r\n \r\n elif 'party' in query:\r\n speak(\"Be ready to rock and roll\")\r\n music_dir = 'D:\\\\EDM'\r\n songs = os.listdir(music_dir) # It will list all the songs in our music directory\r\n print(songs)\r\n speak(\"Dj play music\")\r\n os.startfile(os.path.join(music_dir, songs[0])) # It will play the first song we can use random number technique to play any random song\r\n \r\n elif 'Screenshot' in query:\r\n capture()\r\n print(\"Sir your image has been captured successfully\")\r\n speak(\"Sir your image has been captured successfully\")\r\n speak(\"Do you want to see it\")\r\n query = takecommand()\r\n if 'yes' in query:\r\n path = \"D:\\\\Nova Voice assistant\\\\ScreenShot\\\\\" + filename\r\n os.startfile(path)\r\n \r\n elif 'open stack overflow' in query:\r\n webbrowser.open(\"stackoverflow.com\")\r\n \r\n elif 'the time' in query:\r\n strtime = datetime.datetime.now().strftime(\"%H:%M:%S\") # Gives the current time in given format in form of a string\r\n speak(f\"Sir,the time is {strtime}\")\r\n \r\n elif 'C drive' in query:\r\n path = \"C:\\\\\"\r\n os.startfile(path)\r\n \r\n elif 'D drive' in query:\r\n path = \"D:\\\\\"\r\n os.startfile(path)\r\n\r\n elif 'weather' in query:\r\n speak(\"Tell me the place whose weather report you want\")\r\n place = takecommand()\r\n weather(place)\r\n\r\n elif 'my friend' in query:\r\n speak(\"Welcome friend How Are you?\")\r\n speak(\"Could you help me with your name please...\")\r\n name = 'None'\r\n age = 'None'\r\n gender = 'None'\r\n hobbies = 'None'\r\n qualification = 'None'\r\n favfood = 'None'\r\n\r\n speak(\"What is Your Name\")\r\n print(\"Name\")\r\n while name == 'None':\r\n name = takecommand()\r\n\r\n\r\n speak(\"I also keen to know you gender...\")\r\n while gender == 'None':\r\n gender = takecommand()\r\n\r\n speak('What is your age')\r\n while age == 'None':\r\n age = takecommand()\r\n\r\n\r\n speak(\"I am also interested in knowing your hobbies\")\r\n while hobbies == 'None':\r\n hobbies = takecommand()\r\n\r\n speak(\"Can you tell me something about your Qualifications..\")\r\n while qualification == 'None':\r\n qualification = takecommand()\r\n\r\n speak(\"Lastly tell me something which food item you love to eat\")\r\n while favfood == 'None':\r\n favfood = takecommand()\r\n print(name, age, gender, hobbies, qualification, favfood)\r\n\r\n database(name, age, gender, hobbies, qualification, favfood)\r\n\r\n\r\n elif 'exit' in query:\r\n exit()\r\n","repo_name":"RohanBiturwar/Nova_VoiceAssistant","sub_path":"Nova_Voice_Assistant.py","file_name":"Nova_Voice_Assistant.py","file_ext":"py","file_size_in_byte":12819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41110368342","text":"import logging\nimport os\nimport json\nimport fosslight_util.constant as constant\n\nlogger = logging.getLogger(constant.LOGGER_NAME)\nEMPTY_FILE_PATH = '-'\n\n\ndef write_scancodejson(output_dir, output_filename, oss_list):\n json_output = {}\n json_output['headers'] = []\n json_output['summary'] = {}\n json_output['license_detections'] = []\n json_output['files'] = []\n\n for oi in oss_list:\n if oi.exclude:\n continue\n if not oi.source_name_or_path:\n oi.source_name_or_path = EMPTY_FILE_PATH\n for item_path in oi.source_name_or_path:\n filtered = next(filter(lambda x: x['path'] == item_path, json_output['files']), None)\n if filtered:\n append_oss_item_in_filesitem(oi, filtered)\n else:\n json_output['files'] = add_item_in_files(oi, item_path, json_output['files'])\n with open(os.path.join(output_dir, output_filename), 'w') as f:\n json.dump(json_output, f, sort_keys=False, indent=4)\n\n\ndef append_oss_item_in_filesitem(item, files_item):\n if item.is_binary:\n files_item['is_binary'] = item.is_binary\n if item.name or item.version or item.license or item.copyright or item.download_location or item.comment:\n oss_item = {}\n oss_item['name'] = item.name\n oss_item['version'] = item.version\n oss_item['license'] = item.license\n oss_item['copyright'] = item.copyright\n oss_item['download_location'] = item.download_location\n oss_item['comment'] = item.comment\n files_item['oss'].append(oss_item)\n return files_item\n\n\ndef add_item_in_files(item, item_path, files_list):\n files_item = {}\n files_item['path'] = item_path\n files_item['name'] = os.path.basename(item_path)\n files_item['is_binary'] = item.is_binary\n files_item['base_name'], files_item['extension'] = os.path.splitext(os.path.basename(item_path))\n files_item['oss'] = []\n files_item = append_oss_item_in_filesitem(item, files_item)\n files_list.append(files_item)\n\n return files_list\n","repo_name":"fosslight/fosslight_util","sub_path":"src/fosslight_util/write_scancodejson.py","file_name":"write_scancodejson.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"73227730826","text":"\"\"\"\nGiven two strings A and B, find the minimum number of times A has to be repeated such that B is a substring of it. If no such solution, return -1.\n\nFor example, with A = \"abcd\" and B = \"cdabcdab\".\n\nReturn 3, because by repeating A three times (“abcdabcdabcd”), B is a substring of it; and B is not a substring of A repeated two times (\"abcdabcd\").\n\nNote:\nThe length of A and B will be between 1 and 10000.\n\"\"\"\n\n\"\"\"\nBrute Force: Keep a count of how many times you do A+A and every time check if B in A+A. Time complexity: Joining strings = O(m+m) so in this case O(m) but then\nchecking if B is in A+A is O(m) so total is O(m^2)\n\"\"\"\n\nclass Solution:\n def repeatedStringMatch(self, A: str, B: str) -> int:\n \n new_str = \"\"\n total_transforms = 0\n \n while len(new_str) < len(B): \n new_str += A \n total_transforms += 1\n if B in new_str:\n return total_transforms\n \n\n new_str += A\n\n if B in new_str:\n return total_transforms + 1\n else:\n return -1\n","repo_name":"christian-miljkovic/interview","sub_path":"Leetcode/Algorithms/Easy/Strings/RepeatedStringMatch.py","file_name":"RepeatedStringMatch.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21093825479","text":"import os, sys, discord\nimport re\n\nfrom urllib import parse, request\nfrom discord.ext import commands\nfrom discord.ext.commands import Cog, command\n\n# Only if you want to use variables that are in the config.py file.\nif not os.path.isfile(\"config.py\"):\n sys.exit(\"'config.py' not found! Please add it and try again.\")\nelse:\n import config\n\n# Here we name the cog and create a new class for the cog.\nclass Youtube(commands.Cog, name=\"Youtube\"):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name=\"yt\")\n async def yt(self, ctx, *, search): #get args and search videos on youtube\n global searchResults\n queryString= parse.urlencode({'search_query': search})\n htmlContent= request.urlopen('http://www.youtube.com/results?'+ queryString)\n searchResults=re.findall( r\"watch\\?v=(\\S{11})\", htmlContent.read().decode())#get videos id\n\n await ctx.send(f\"We have found {len(searchResults)} results.\\n\\n Result:\\nhttp://www.youtube.com/watch?v={searchResults[0]}\") #get and send the first video\n\n \"\"\"@command(name=\"nextyt\")\n async def nextyt(ctx, option:int):\n leng = len(searchResults)\n if leng != 0:\n await ctx.send(f\"http://www.youtube.com/watch?v={searchResults[option]}\")\n else:\n await ctx.send(\"You haven't searched for any videos!!\")\"\"\"\n\ndef setup(bot):\n bot.add_cog(Youtube(bot))\n\n\n\"\"\"\"\n class Youtube(Cog):\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\n\n @command(name=\"yt\")\n async def yt\n \n command(name=\"yt\")\n async def yt(ctx, *, search): #get args and search videos on youtube\n global searchResults\n queryString= parse.urlencode({'search_query': search}) #take the args from the user and parse that to a url\n htmlContent= request.urlopen('http://www.youtube.com/results?'+ queryString)\n searchResults=re.findall( r\"watch\\?v=(\\S{11})\", htmlContent.read().decode())#get videos id\n \n await ctx.send(f\"Se han encontrado {len(searchResults)} resultados.\\n\\nPrimer Resultado:\\nhttp://www.youtube.com/watch?v={searchResults[0]}\") #get and send the first video\n\n @command(name=\"nextyt\")\n async def nextyt(ctx, option:int):\n leng = len(searchResults)\n if leng != 0:\n await ctx.send(f\"http://www.youtube.com/watch?v={searchResults[option]}\")\n else:\n await ctx.send(\"No has buscado videos!! para ver los comandos utiliza: -help\n \ndef setup(bot):\n bot.add_cog(Template(bot))\"\"\"\n","repo_name":"ryanegenlangston1993/hrtrn","sub_path":"cogs/yt.py","file_name":"yt.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26819264436","text":"class Range_iterator(object):\r\n def __init__(self, iterable, start=0, end=None, step=1):\r\n self.iterable = iterable\r\n self.idx = start\r\n if end is None:\r\n end = len(self.iterable)\r\n elif end > len(self.iterable):\r\n raise KeyError(\"End must be less or equal than length of iterable\")\r\n self.end = end\r\n self.step = step\r\n\r\n def __iter__(self):\r\n return self\r\n\r\n def __next__(self):\r\n if self.idx == self.end:\r\n raise StopIteration\r\n value = self.iterable[self.idx]\r\n self.idx += self.step\r\n return value\r\n\r\n\r\nif __name__ == \"__main__\":\r\n l = [i for i in range(15)]\r\n t = (\"a\", \"b\", \"c\", \"d\", \"e\")\r\n print(l)\r\n for i in Range_iterator(l, start=3, end=15, step=2):\r\n print(i)\r\n print(t)\r\n for i in Range_iterator(t):\r\n print(i)","repo_name":"220vma/HW17","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5914771491","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport datetime\nimport ast\nimport fire\n\nimport pandas as pd\nimport pymsteams\n\nWEBHOOK_URL = os.environ[\"WEBHOOK_URL\"]\n\ndef main(building=\"west\", target_date=None):\n\n if not target_date:\n today = datetime.datetime.now()\n else:\n today = datetime.datetime.strptime(target_date, \"%Y-%m-%d\")\n \n monday = today - datetime.timedelta(days=today.weekday())\n\n building_kor = \"서관\" if building==\"west\" else \"동관\"\n\n menu_df = pd.read_csv(f\"./storage/twin_menu_{building}_{monday.strftime('%Y%m%d')}.csv\", index_col=\"Unnamed: 0\")\n\n menu_date = pd.Index([md[1] for md in menu_df.columns.str.split(\"_\")])\n today_menu = menu_df.loc[:, menu_date == today.strftime(\"%Y%m%d\")]\n weekday_kor = [\"월\", \"화\", \"수\", \"목\", \"금\", \"토\", \"일\"]\n\n myTeamsMessage = pymsteams.connectorcard(WEBHOOK_URL)\n\n if today_menu.empty:\n myTeamsMessage.text(f\"**{today.strftime('%Y-%m-%d')} ({weekday_kor[today.weekday()]})** : 오늘의 메뉴를 불러올 수 없습니다.\")\n\n else:\n # create the section\n myMessageSection = pymsteams.cardsection()\n\n # Section Title\n myMessageSection.title(f\"**{today.strftime('%Y-%m-%d')} ({weekday_kor[today.weekday()]})**\")\n\n # Activity Elements\n myMessageSection.activityTitle(f\"오늘의 메뉴 ({building_kor})\")\n if building == \"west\":\n myMessageSection.activityImage(\"https://img.icons8.com/office/452/west.png\")\n else:\n myMessageSection.activityImage(\"https://img.icons8.com/office/452/east.png\")\n\n # Facts are key value pairs displayed in a list.\n for section_name in today_menu.index:\n\n menu_list = ast.literal_eval(today_menu.loc[section_name, :][0])\n\n menu_string = \"\"\" \"\"\"\n for menu in menu_list:\n \n menu_string += \"
    \" + menu + \"
    \"\n menu_string += \"\"\"\\n\\n\"\"\"\n\n myMessageSection.addFact(section_name, menu_string)\n myMessageSection.addFact(\"\", \"---\")\n\n # Add your section to the connector card object before sending\n myTeamsMessage.addSection(myMessageSection)\n myTeamsMessage.summary(\"Test Message\")\n\n myTeamsMessage.send()\n\nif __name__==\"__main__\":\n fire.Fire()","repo_name":"KeunhoLee/twin_menu_scrap","sub_path":"send_menu.py","file_name":"send_menu.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8165720449","text":"# -*- coding: utf-8 -*-\n\nimport requests\nimport json\nTOKEN_DADATA = \"d6c4e6cc7f93ad1ff6b8e8c76528d501135eb7ec\"\nURL_DADATA = \"https://suggestions.dadata.ru/suggestions/api/4_1/rs/findById/party\"\n\n#for telegram bot\nTOKEN_BOT = '1988666354:AAEN2cxASigBH-fBmglNBaFzY1Io1tkuV1k'\n\n#for api leasing-trade\nTOKEN_LT = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjVkYjE1Y2U2MjQwZGRkMDhjYWUwZmIxMiIsImlhdCI6MTYyOTQ0OTA0OX0.7VggX0mQFEAC2Y7uyRDhn33_FoVr1iEf4SevWUWqyFI'\nURL_LT = 'https://api2.leasing-trade.ru:4000/graphql'\n\n#for api damia\nTOKEN_DM = '8b67f61866498ce93c36257099f2f44de00b37b4'\nURL_DM = 'https://api.damia.ru/rs/balance'\nYEAR_DM = '2020'\nCODE_DM = '2110'\nmessage = '1655096633'\n\n\ndef get_info_DM(message):\n try:\n param_request = {'inn': message, 'key': TOKEN_DM} \n response = requests.get(URL_DM, params=param_request)\n z = response.status_code\n if z == '200':\n print(\"Не удалось получить ответ от сервера.\")\n else:\n data = json.loads(response.text)\n DM=[]\n DM.append(data[message][YEAR_DM][CODE_DM])\n x = DM[0] \n print(\"Выручка за 2020 год: \" + str(x) + \" тыс.руб.\")\n except Exception as e:\n\n print(e)\n\n\nget_info_DM(message)\n","repo_name":"Snipe87/tb","sub_path":"tg_bot/Bot2.py","file_name":"Bot2.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25230945868","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\n\n\ntitanic_df = pd.read_csv(\"./datasets/titanic_mine.csv\")\ntitanic_df.head()\n\n\nX = titanic_df.drop(\"Survived\", axis = 1)\nY = titanic_df.Survived\n\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2)\n\n\n# helper function that prints metrics score based on prediction and actual target passed\ndef summarize_classification(y_test, y_pred):\n acc = accuracy_score(y_test, y_pred, normalize = True) # since norm = True, acc in term of fraction\n num_acc = accuracy_score(y_test, y_pred, normalize = False)\n \n prec = precision_score(y_test, y_pred)\n recall = recall_score(y_test, y_pred)\n \n print(f\"accuracy count: {num_acc}\")\n print(f\"accuracy_score: {acc}\")\n print(f\"precision_score: {prec}\")\n print(f\"recall score: {recall}\")\n print()\n\n\nfrom sklearn.model_selection import GridSearchCV\n\nparam_grid = {\"max_depth\": [2, 4, 5, 7, 9, 10]}\n\ngrid_search = GridSearchCV(DecisionTreeClassifier(), param_grid, cv = 3, return_train_score=True)\ngrid_search.fit(x_train, y_train)\n\ngrid_search.best_params_\n\n\n# creates a de tree model using best max_depth ret by grid_search\ndecision_tree_model = DecisionTreeClassifier(\n max_depth = grid_search.best_params_[\"max_depth\"]\n).fit(x_train, y_train)\n\n\ny_pred = decision_tree_model.predict(x_test)\n\n# use helper function to get metrics for this model\nsummarize_classification(y_test, y_pred)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Neo-glitch/building-classfication-models-sklearn","sub_path":".virtual_documents/HyperParamter_tuning.ipynb.py","file_name":"HyperParamter_tuning.ipynb.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17445553244","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# $Id$\n\"\"\"\nSome useful functions around types, imitating the PHP style type checking\nfunctions. Of course, adapted to the types available in Python.\n\"\"\"\n\n#====================================================[ Imports and Presets ]===\nfrom types import *\n\n#==============================================================[ Own Types ]===\nclass nullDict(dict):\n \"\"\" A dictionary without KeyErrors (returning None instead) \"\"\"\n def __missing__(self,key):\n return None\n\n#==============================================================[ Functions ]===\n#----------------------------------------------------------[ Type checking ]---\n\"\"\"\nType checking functions have been taken from:\nhttp://code.activestate.com/recipes/305888-a-way-to-deal-with-checking-for-types/\n\"\"\"\ndef check_type(obj,atts=[],callables=[]):\n \"\"\"\n Helper for is_mapping(), is_list(), is_str() and is_file()\n @param object object to check\n @param optional list atts attributes the object must have (default: empty list)\n @param optional list callables callables the object must have (default: empty list)\n \"\"\"\n got_atts=True\n for att in atts:\n if not hasattr(obj,att):\n got_atts=False;break\n got_callables=True\n for call in callables:\n if not hasattr(obj,call):\n got_callables=False;break\n the_attr=getattr(obj,call)\n if not callable(the_attr):\n got_callables=False;break\n if got_atts and got_callables: return -1\n return 0\n\ndef is_iter(obj):\n \"\"\"\n Check whether the object is iterable\n @param object object to check\n @return int 1 if True, 0 if False, -1 if iterable but neither list, tuple, dict or file\n \"\"\"\n if isinstance(obj,ListType): return 1\n if isinstance(obj,TupleType): return 1\n if isinstance(obj,DictType): return 1\n if isinstance(obj,FileType): return 1\n if hasattr(obj,'__iter__') : return -1\n return 0\n\ndef is_gen(obj):\n \"\"\"\n Is the object a generator?\n @param object object to check\n @return int 1 if True, 0 if False\n \"\"\"\n if isinstance(obj,GeneratorType): return 1\n return 0\n\ndef is_seq(obj):\n \"\"\"\n Is the object a sequence?\n @param object object to check\n @return int 1 if True, 0 if False, -1 if obj[0:0] works but it's neither list nor tuple (but e.g. str)\n \"\"\"\n if isinstance(obj,ListType): return 1\n if isinstance(obj,TupleType): return 1\n if is_iter(obj):\n try: \n obj[0:0]\n return -1\n except TypeError:\n pass\n return 0 \n \ndef is_mapping(obj):\n \"\"\"\n Is the object a mapping type (e.g. dictionary)?\n @param object object to check\n @return int 1 if True, 0 if False, -1 if it's not a dict but has callables\n \"\"\"\n if isinstance(obj,DictType): return 1\n if is_iter(obj):\n return check_type(obj,callables=['iteritems','has_key'])\n return 0\n\ndef is_dict(obj):\n \"\"\"\n Is it a dictionary?\n @param object object to check\n @return int 1 if True, 0 if False, -1 if it's not a dict but has callables\n \"\"\"\n return is_mapping(obj)\n\ndef is_list(obj):\n \"\"\"\n Is the object a list?\n @param object object to check\n @return int 1 if True, 0 if False, -1 if it's not a list, but has callables append, extend, and pop\n \"\"\"\n if isinstance(obj,ListType): return 1\n if is_seq(obj):\n if check_type(obj,callables=['append','extend','pop']): return -1\n return 0\n\ndef is_str(obj):\n \"\"\"\n Is the object a string?\n @param object object to check\n @return int 1 if True, 0 if False, -1 not str but has callables index, count, and replace\n \"\"\"\n if isinstance(obj, basestring): return 1\n if is_iter(obj):\n if check_type(obj,callables=['index','count','replace']): return -1\n return 0\n\ndef is_string(obj):\n \"\"\" alias for is_str \"\"\"\n return is_str(obj)\n\ndef is_int(obj):\n \"\"\"\n Is it an integer?\n @param object object to check\n @return int 1 if True, 0 if False, -1 not str but has callables index, count, and replace\n \"\"\"\n if isinstance(obj, int) : return 1\n return 0\n\ndef is_numeric(obj):\n \"\"\"\n Is it a number - i.e. an integer or float?\n @param object object to check\n @return int 1 if True, 0 if False, -1 not str but has callables index, count, and replace\n \"\"\"\n try:\n float(obj)\n return 1\n except ValueError:\n pass\n return 0\n\ndef is_file(obj):\n \"\"\"\n Is the object a file?\n @param object object to check\n @return int 1 if True, 0 if False, -1 if it's not FileType but has callables read and close\n \"\"\"\n if isinstance(obj,FileType): return 1\n if check_type(obj,callables=['read','close']): return -1\n return 0\n\ndef is_what(obj):\n \"\"\"\n Get the type of the passed object\n @param object object to check\n @return mixed string category (if we have a direct match) or list of 0/1 [iter,gen,seq,list,str,dict,file]\n \"\"\"\n try:\n if obj.__class__.__name__: return obj.__class__.__name__\n except:\n return [ str(i) for i in (is_iter(obj),is_gen(obj),is_seq(obj),is_list(obj),is_str(obj),is_mapping(obj),is_file(obj))]\n\n","repo_name":"IzzySoft/HyperSQL","sub_path":"lib/iz_tools/typecheck.py","file_name":"typecheck.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"1981055461","text":"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# plt.style.use('seaborn-darkgrid')\r\n# plt.figure(figsize=(7,7))\r\n\r\n########################################################################\r\n\r\nfont1 = {'family' : 'Times New Roman', 'weight' : 'normal', 'size' : 22}\r\nfont2 = {'family' : 'Times New Roman', 'weight' : 'normal', 'size' : 22}\r\n\r\n\r\nfile_name1 = 'lend_cifar10_ccn_0.4_K8_256_beta0.9_1'\r\nfile_name2 = 'lend_cifar10_ccn_0.4_K8_256_beta0.8_1'\r\n# file_name3 = 'lend_cifar100_rcn_0.6_K3_64_beta0.9_'\r\npath1 = 'results1011/' + str(file_name1) + '.txt'\r\npath2 = 'results1011/' + str(file_name2) + '.txt' \r\n# path3 = 'results1/' + str(file_name3) + '.txt'\r\nacc1 = [] \r\nacc2 = []\r\nacc3 = []\r\n\r\nmyfile1 = open(path1)\r\nfor line in myfile1.readlines():\r\n if 'valid' in line:\r\n temp = line.split()\r\n # print(temp)\r\n acc1.append(float(temp[1]))\r\n if len(acc1)>199:\r\n break\r\n \r\nmyfile2 = open(path2)\r\nfor line in myfile2.readlines():\r\n if 'valid' in line:\r\n temp = line.split()\r\n acc2.append(float(temp[1]))\r\n if len(acc2)>199:\r\n break\r\n\r\n# myfile3 = open(path3)\r\n# for line in myfile3.readlines():\r\n# if 'valid' in line:\r\n# temp = line.split()\r\n# # print(temp)\r\n# acc3.append(float(temp[1]))\r\n# if len(acc3)>199:\r\n# break\r\n\r\nacc1 = np.array(acc1)\r\nacc2= np.array(acc2)\r\n# acc3= np.array(acc3)\r\nx = np.linspace(1,acc1.shape[0],acc1.shape[0])\r\nx = x-1\r\nplt.style.use('seaborn-darkgrid')\r\nplt.figure(figsize=(7,7))\r\nf1, = plt.plot(x, acc1, c='red', linewidth=2, label = 'none')\r\nf2, = plt.plot(x, acc2, c='blue', linewidth=2, label = 'sharp')\r\n# f3, = plt.plot(x, acc3, c='black', linewidth=2, label = 'K=9')\r\nplt.legend(handles=[f1, f2], prop=font1, loc='lower right')\r\n# plt.savefig(\"figs/rcn/K8vs9.png\", dpi=600, bbox_inches='tight')\r\n\r\n\r\n\r\n\r\n# mpacc = np.array(mpacc)\r\n# knnacc = np.array(knnacc)\r\n# mpmean = np.mean(mpacc, axis = 0)\r\n# knnmean = np.mean(knnacc, axis = 0)\r\n# mpmax = np.max(mpacc, axis = 0)\r\n# mpmin = np.min(mpacc, axis = 0)\r\n# knnmax = np.max(knnacc, axis = 0)\r\n# knnmin = np.min(knnacc, axis = 0)\r\n\r\n# print(knnmean)\r\n\r\n# x = np.linspace(1,mpacc.shape[1],mpacc.shape[1])\r\n# # x = x-1\r\n# plt.style.use('seaborn-darkgrid')\r\n# plt.figure(figsize=(7,4.5))\r\n# f1, = plt.plot(x, mpmean, c='red', label='Model predicted labels', linewidth=2)\r\n# f2, = plt.plot(x, knnmean, c='blue', label=r'Diluted labels', linewidth=2)\r\n# plt.fill_between(x, mpmin, mpmax, color='red', alpha=0.1)\r\n# plt.fill_between(x, knnmin, knnmax, color='blue', alpha=0.1)\r\n# plt.xlabel(r'Epoch', font2)\r\n# plt.ylabel('Accuracy (%)', font2)\r\n# plt.xlim(0, 100)\r\n# plt.ylim(25.5, 95)\r\n# legend = plt.legend(handles=[f1, f2], prop=font1, loc='lower right')#, loc='lower left'\r\n# plt.savefig(\"acc.png\", dpi=600, bbox_inches='tight')\r\n# print\r\n\r\n\r\n########################################################################\r\n\r\n# color = ['r', 'b', 'y', 'c', 'g', 'k']\r\n# for i in [4,5,6,7,8,9]:\r\n\r\n# lepath = 'Z:/lend/cifar/result_beta/LEND05_cifar10_ccn_0.4_K5_beta0.' + str(i) + '_.txt'\r\n# lefile = open(lepath)\r\n# leacc = []\r\n# for line in lefile.readlines():\r\n# if 'valid' in line:\r\n# temp = line.split()\r\n# leacc.append(float(temp[1]))\r\n# if len(leacc)>199:\r\n# break\r\n# leacc = np.array(leacc)\r\n# x = np.linspace(1, leacc.shape[0], leacc.shape[0])\r\n# plt.plot(x, leacc, color=color[i-4])\r\n# plt.legend(['0.'+str(i)])\r\n\r\n\r\n\r\n\r\n# lepath = 'Z:/lend/cifar/result_beta/LEND05_cifar10_rcn_0.4_K5.txt'\r\n# lefile = open(lepath)\r\n# leacc1 = []\r\n# for line in lefile.readlines():\r\n# if 'valid' in line:\r\n# temp = line.split()\r\n# leacc1.append(float(temp[1]))\r\n# if len(leacc1)>199:\r\n# break\r\n# leacc1 = np.array(leacc1)\r\n# x = np.linspace(1, leacc1.shape[0], leacc1.shape[0])\r\n\r\n\r\n\r\n\r\n#########################################################################\r\n# copath = 'Z:\\\\lend\\\\Co-teaching-master\\\\results\\\\cifar100\\coteaching/coteachingcifar100_coteaching_pairflip_0.4.txt'\r\n# copath = 'Z:\\\\lend/JoCoR-master\\\\results\\\\jocor_cifar10_pairflip_0.2.txt'\r\n# cofile = open(copath)\r\n# coacc = []\r\n\r\n# for line in cofile.readlines():\r\n# if 'valid' in line:\r\n# temp = line.split()\r\n# coacc.append(float(temp[2]))\r\n# if len(coacc)>199:\r\n# break\r\n\r\n# coacc = np.array(coacc)\r\n# x = np.linspace(1, coacc.shape[0], coacc.shape[0])\r\n# print(max(coacc))\r\n\r\n\r\n#########################################################################\r\n# cepath = 'Z:\\\\lend\\\\Co-teaching-master\\\\results\\\\cifar10\\coteaching/coteachingcifar10_coteaching_pairflip_0.3.txt'\r\n# cefile = open(copath)\r\n# ceacc = []\r\n\r\n# for line in cefile.readlines():\r\n# if 'valid' in line:\r\n# temp = line.split()\r\n# ceacc.append(float(temp[1]))\r\n# if len(ceacc)>199:\r\n# break\r\n\r\n# ceacc = np.array(coacc)\r\n# x = np.linspace(1, ceacc.shape[0], ceacc.shape[0])\r\n# plt.style.use('seaborn-darkgrid')\r\n# plt.figure(figsize=(7,7))\r\n\r\n\r\n# leplot1 = plt.plot(x, leacc1, color='r')\r\n# leplot = plt.plot(x, leacc)\r\n# coplot = plt.plot(x, coacc)\r\n# leplot = plt.plot(x, ceacc)\r\n\r\n\r\n\r\n\r\n# plt.savefig(\"compare_cm.png\", dpi=600, bbox_inches='tight') # 保存图片\r\nplt.show() # 显示图片\r\n","repo_name":"zhangchuang96/lend","sub_path":"plot_acc.py","file_name":"plot_acc.py","file_ext":"py","file_size_in_byte":5299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3800899552","text":"from utilities.inv import __inv__\n\nmyList = [\"abc\", \"bcd\", \"cde\", \"def\", \"efg\", \"fgh\", \"ghi\", \"hij\", \"ijk\", \"jkl\"]\nstr1 = \"ghi\"\ni = 0\nj = -1\nwhile i < len(myList):\n\t__inv__(myList=myList, str1=str1, i=i, j=j)\n\tif myList[i] == str1:\n\t\tj = i\n\t\ti = len(myList)\n\telse:\n\t\ti += 1\n","repo_name":"AbuJabal-Hussein/Loop-Invariant-Synthesizer","sub_path":"benchmarks/hybrid_benchmarks/test6/test6_hybrid.py","file_name":"test6_hybrid.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12953909780","text":"# readable\n# commenting your code\n# technical complexity - heavyload\n# recursive function -> avoid\nimport json\nimport requests\n\ninput_keyword = \"laugh\"\napi_url = \"https://api.giphy.com/v1/gifs/search\"\napi_key = \"hpvZycW22qCjn5cRM1xtWB8NKq4dQ2My\"\nparameters = {\"api_key\": \"hpvZycW22qCjn5cRM1xtWB8NKq4dQ2My\", \"q\": \"laugh\"}\nresponse = requests.get(api_url, params=parameters)\n\n# api_request_url = f\"{api_url}?&q={input_keyword}&api_key={api_key}&limit=5\"\n# print(api_request_url)\n# response = requests.get(api_request_url, params)\nif response.status_code == 200:\n response = response.json()\n # response = json.load(response)\n # print(response)\nprint(response['data'][0]['images']['original']['height'])\n\n# unable to fetch values from json response\n","repo_name":"malharlakdawala/DevelopersInstitute","sub_path":"Week5/Python/Day5/giphyapi.py","file_name":"giphyapi.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26456168320","text":"from django.contrib import admin\n\nfrom core.names import FIELDS\nfrom .models import Follow, User\n\n\n@admin.register(User)\nclass UserAdmin(admin.ModelAdmin):\n \"\"\"Источник конфигурации модели User, позволяет:\n - отображать в админке первичный ключ, email, имя пользователя,\n фамилию, имя;\n - редактировать все поля, кроме первичного ключа;\n - проводить поиск и фильтровать по имени пользователя,\n имени, и фамилии;\n - выводить \"-пусто-\" в полях со значением None.\"\"\"\n list_display = (\n 'pk',\n 'email',\n 'username',\n 'is_active',\n 'first_name',\n 'last_name',\n )\n list_editable = (\n 'email',\n 'username',\n 'is_active',\n 'first_name',\n 'last_name',\n )\n search_fields = (\n 'email',\n 'username',\n 'first_name',\n 'last_name',\n )\n list_filter = ('email', 'username',)\n empty_value_display = FIELDS['EMPTY']\n\n\n@admin.register(Follow)\nclass FollowAdmin(admin.ModelAdmin):\n \"\"\"Источник конфигурации модели Follow, позволяет:\n - отображать в админке первичный ключ, подписчика и\n автора, на которого происходит подписка;\n - редактировать все поля, кроме первичного ключа;\n - удалять подписку;\n - проводить поиск по авторам и подписчикам;\n - выводить \"-пусто-\" в полях со значением None.\"\"\"\n list_display = ('pk', 'author', 'user',)\n list_editable = ('author', 'user',)\n search_fields = ('author', 'user',)\n empty_value_display = FIELDS['EMPTY']\n","repo_name":"Galenfea/foodgram-project-react","sub_path":"backend/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32727397683","text":"from rest_framework import serializers\n\nfrom catalog.models import Director\nfrom catalog.models import Genre\nfrom catalog.models import Movie\n\n\nclass NewMovieSerializer(serializers.ModelSerializer):\n genre = serializers.CharField(max_length=240)\n director = serializers.CharField(max_length=340)\n\n class Meta:\n model = Movie\n fields = (\n 'id',\n 'name',\n 'duration',\n 'year',\n 'stars',\n 'genre',\n 'director',\n )\n\n def validate_director(self, value):\n director, created = Director.objects.get_or_create(\n name=value,\n defaults={\n 'created_by': self.context['request'].user,\n }\n )\n return director\n\n def validate_genre(self, value):\n genre, created = Genre.objects.get_or_create(\n name=value,\n defaults={\n 'created_by': self.context['request'].user,\n }\n )\n return genre\n\n def create(self, validated_data):\n return Movie.objects.create(\n created_by=self.context['request'].user,\n **validated_data,\n )\n","repo_name":"direyes71/movie","sub_path":"api/catalog/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36410631598","text":"#BPSK 的SISO、MRC(2x1)、MRC(4x1) 錯誤率\n#(2x1)代表傳送端有兩根天線,接收端有一根天線\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nsnr_db = [0]*10\nsnr = [0]*10\nber = [0]*10\nN = 10000000 #執行N次來找錯誤率\nfor i in range(10):\n snr_db[i] = 2*i\n snr[i] = np.power(10,snr_db[i]/10)\n\nconstellation = [-1, 1]\n\nfor k in range(6):#總共有SISO、MRC(1x2)、MRC(1x4)、SISO(theory)、MRC(1x2) (theory)、MRC(2x1)\n for i in range(len(snr)):\n\n K = int(np.log2(len(constellation))) # 代表一個symbol含有K個bit\n # 接下來要算平均一個symbol有多少能量\n # 先將所有可能的星座點能量全部加起來\n energy = 0\n for m in range(len(constellation)):\n energy += abs(constellation[m]) ** 2\n Es = energy / len(constellation) # 平均一個symbol有Es的能量\n Eb = Es / K # 平均一個bit有Eb能量\n\n\n if k == 3: # SISO(theory)\n ber[i] = 1/2-1/2*np.power(1+1/snr[i],-1/2)\n elif k==4: # MRC(1x2) (theory)\n ber[i] = 1 / 2 - 1 / 2 * np.power(1 + 1 / snr[i], -1 / 2)\n ber[i] = ber[i]*ber[i]*(1+2*(1-ber[i]))\n elif k==5: # MRC(2x1) ---> 模擬結果同SISO\n error = 0\n for j in range(N):\n\n b = np.random.random() # 產生一個 (0,1) uniform 分布的隨機變數,來決定要送哪個symbol\n for m in range(len(constellation)):\n if b <= (m + 1) / len(constellation):\n symbol = constellation[m]\n break\n\n # 因為一次重複送兩個symbol所以平均一個bit的能量變兩倍 Eb_new = 2*Eb\n No = 2*Eb / snr[i]\n\n # 接下來symbol會通過Rayleigh channel\n h = [0] * 2\n receive = 0\n for l in range(2):\n h[l] = 1 / np.sqrt(2) * np.random.randn() + 1j / np.sqrt(2) * np.random.randn() # 產生 rayleigh 分布的通道模型\n receive += symbol * h[l]\n receive += np.sqrt(No / 2) * np.random.randn() + 1j * np.sqrt(No / 2) * np.random.randn()\n\n # 接下來使用match filter\n receive_symbol = receive / (h[0]+h[1])\n\n # receive_symbol就是接收端一根天線使用maximum ratio combining 後的結果 ----> 為一純量\n # 接收端利用Maximum Likelihood來detect symbol\n min_distance = 10 ** 9\n for n in range(len(constellation)):\n if abs(constellation[n] - receive_symbol) < min_distance:\n detection = constellation[n]\n min_distance = abs(constellation[n] - receive_symbol)\n # 我們會將傳送端送出的第m個symbol,detect出來,結果為detection\n\n if symbol != detection:\n error += 1\n ber[i] = error / (K*N)\n\n else: # 1x1 SISO通道、1x2 MISO通道、1x4 MISO通道\n error = 0\n for j in range(N):\n\n b = np.random.random() # 產生一個 (0,1) uniform 分布的隨機變數,來決定要送哪個symbol\n for m in range(len(constellation)):\n if b <= (m + 1) / len(constellation):\n symbol = constellation[m]\n break\n\n No = Eb / snr[i]\n\n h = [0]*(2**k)\n receive = [0]*(2**k)\n # 若k=0 代表 1x1 SISO通道,所以只要產生一個通道即可\n # 若k=1 代表 1x2 MISO通道,所以要產生兩個通道\n # 若k=2 代表 1x4 MISO通道,所以要產生四個通道\n for l in range(2**k):\n h[l] = 1/np.sqrt(2)*np.random.randn() + 1j/np.sqrt(2)*np.random.randn() #產生 rayleigh 分布的通道模型\n receive[l] = symbol*h[l] + np.sqrt(No/2)*np.random.randn() + 1j*np.sqrt(No/2)*np.random.randn()\n receive_symbol = 0\n\n #接下來使用match filter\n for l in range(2**k):\n receive_symbol += receive[l] * (h[l].conjugate())\n h_norm2 = 0\n for l in range(2**k):\n h_norm2 += abs(h[l])**2\n receive_symbol /= h_norm2\n\n # receive_symbol就是接收端一根天線使用maximum ratio combining 後的結果 ----> 為一純量\n # 接收端利用Maximum Likelihood來detect symbol\n min_distance = 10 ** 9\n for n in range(len(constellation)):\n if abs(constellation[n] - receive_symbol) < min_distance:\n detection = constellation[n]\n min_distance = abs(constellation[n] - receive_symbol)\n # 我們會將傳送端送出的第m個symbol,detect出來,結果為detection\n\n if symbol != detection:\n error += 1\n\n ber[i] = error/(K*N)\n if k==0:\n plt.semilogy(snr_db, ber,marker='o', linestyle='-', label=\"SISO for BPSK\")\n elif k>=1 and k<=2:\n plt.semilogy(snr_db, ber, marker='o', linestyle='-', label=\"MRC(1x{0}) for BPSK\".format(2**k))\n elif k == 3:\n plt.semilogy(snr_db, ber, marker='o', linestyle='-', label=\"SISO(theory) for BPSK\")\n elif k == 4:\n plt.semilogy(snr_db, ber, marker='o', linestyle='-', label=\"MRC(1x2) (theory) for BPSK\")\n elif k == 5:\n plt.semilogy(snr_db, ber, marker='o', linestyle='-', label=\"MRC(2x1) for BPSK\")\n\nplt.title('Maximal Ratio Combining(MBC)')\nplt.ylabel(\"BER\")\nplt.xlabel(\"Eb/No , dB\")\nplt.grid(True,which='both')\nplt.legend()\nplt.show()","repo_name":"XassassinXsaberX/communication-simulation","sub_path":"MIMO/Maximal Ratio Combining(MBC).py","file_name":"Maximal Ratio Combining(MBC).py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"} +{"seq_id":"40023360950","text":"\r\nimport ijson\r\nimport numpy\r\nimport numpy.matlib\r\nfrom scipy.sparse import csc_matrix,lil_matrix\r\nfrom scipy import sparse\r\nfrom operator import itemgetter\r\nimport pandas as pd\r\n\r\n\r\ndef sp_calc_error(temp, height,width,k):\r\n tot_err=0.0\r\n global main_mat\r\n tot=0\r\n l = len(main_mat.nonzero()[0])\r\n tm1 = main_mat.nonzero()[0].copy()\r\n tm2 = main_mat.nonzero()[1].copy()\r\n for p in range(0,l):\r\n #print(p,l)\r\n #i,j = main_mat.nonzero()[0][p], main_mat.nonzero()[1][p]\r\n i, j = tm1[p], tm2[p]\r\n #print(main_mat[i,j]-temp[i,j])\r\n tot = tot + 1\r\n tot_err = tot_err + (main_mat[i,j]-temp[i,j]) * (main_mat[i,j]-temp[i,j])\r\n\r\n return (tot_err/tot) ** (.5)\r\n\r\n\r\ndef calc_error(temp, height,width,k):\r\n global U\r\n global V\r\n tot_err=0\r\n global main_mat\r\n l = len(main_mat.nonzero()[0])\r\n tm1 = main_mat.nonzero()[0].copy()\r\n tm2 = main_mat.nonzero()[1].copy()\r\n\r\n for p in range(0,l):\r\n #print(p,l)\r\n #i,j = main_mat.nonzero()[0][p], main_mat.nonzero()[1][p]\r\n i, j = tm1[p], tm2[p]\r\n #print(main_mat[i,j]-temp[i,j])\r\n tot_err = tot_err + (main_mat[i,j]-temp[i,j]) * (main_mat[i,j]-temp[i,j])\r\n\r\n #return tot_err\r\n sum1 = 0\r\n sum2 = 0\r\n\r\n\r\n for row in range(0,height):\r\n temp_row=0\r\n for col in range(0,k):\r\n temp_row = temp_row + U[row, col]*U[row, col]\r\n\r\n sum1 = sum1 + lu * temp_row\r\n\r\n \r\n for col in range(0, width):\r\n temp_row = 0\r\n for row in range(0, k):\r\n temp_row = temp_row + V[row,col]*V[row,col]\r\n\r\n sum2 = sum2 + lv * temp_row\r\n\r\n\r\n return sum1 + sum2 + tot_err\r\n\r\n\r\n\r\n\r\ndef update_U(height, width, k, lu, lv):\r\n global U\r\n global V\r\n global main_mat\r\n\r\n for row in range (0,height):\r\n #print(\"column\", row)\r\n fill_col=[]\r\n\r\n tr = main_mat[row, :]\r\n for pp in tr.nonzero()[1]:\r\n fill_col.append(pp)\r\n #print(\"fill\",len(fill_col))\r\n\r\n store = numpy.matlib.zeros((k, k))\r\n for col in fill_col:\r\n store = V[:,col] * V[:,col].transpose() + store\r\n\r\n id = numpy.identity(k);\r\n store = store + lu * id\r\n store = store.I\r\n\r\n sec_store=numpy.matlib.zeros((k, 1))\r\n\r\n for col in fill_col:\r\n #print(\"seccolumn\", col)\r\n sec_store = sec_store + main_mat[row, col] * V[:, col]\r\n\r\n fin = store * sec_store\r\n fin = fin.transpose()\r\n U[row, :] = fin\r\n\r\n\r\ndef update_V(height, width, k, lu, lv):\r\n global U\r\n global V\r\n global main_mat\r\n for col in range (0,width):\r\n\r\n fill_row=[]\r\n tmp=main_mat[:,col]\r\n for t in tmp.nonzero()[0]:\r\n fill_row.append(t)\r\n #print(\"fill col\", len(fill_row))\r\n\r\n store = numpy.matlib.zeros((k, k))\r\n for row in fill_row:\r\n store = U[row, :].transpose() * U[row, :] + store\r\n\r\n id = numpy.identity(k);\r\n store = store + lv * id\r\n store = store.I\r\n\r\n sec_store=numpy.matlib.zeros((k, 1))\r\n for p in fill_row:\r\n #print(\"p\", p)\r\n j=U[p, :]\r\n j=j.transpose()\r\n sec_store = sec_store + main_mat[p, col] * j\r\n\r\n fin = store * sec_store\r\n\r\n V[:, col] = fin\r\n\r\n\r\n\r\n\r\ndf = pd.read_excel(\"ratings_train.xlsx\", header=None)\r\ntrain_all = df.as_matrix()\r\n\r\n\r\n\r\ndf = pd.read_excel(\"ratings_validate.xlsx\", header=None)\r\nval_all = df.as_matrix()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ncons=[.01,.1,1,10]\r\nlat=[10,20,40]\r\n\r\nac_parameter=(0,0,0)\r\nac_ref=-1\r\n\r\nbest_V = 0\r\n\r\nfor k in lat:\r\n for lu in cons:\r\n lv = lu\r\n #print(\"one it\")\r\n height = len(train_all)\r\n width = len(train_all[0])\r\n height = 1000\r\n width = 100\r\n print(\"K \", k)\r\n print(\"LU\", lu)\r\n\r\n main_mat = lil_matrix((height,width), dtype=numpy.float)\r\n\r\n V = numpy.matlib.rand((k, width))\r\n U = numpy.matlib.zeros((height, k))\r\n for i in range(0,height):\r\n for j in range (0,width):\r\n if train_all[i][j] == -1:\r\n #print(\"yo\")\r\n continue\r\n main_mat[i, j] = float(train_all[i][j])\r\n #print(float(train_all[i][j]))\r\n\r\n pre=-1\r\n for it in range(1,30):\r\n\r\n uu = lil_matrix(U)\r\n vv = lil_matrix(V)\r\n temp = uu * vv\r\n\r\n error = calc_error(temp, height, width, k)\r\n print(k,lu,lv)\r\n #print(\"local error \",error)\r\n if pre != -1:\r\n diff=abs(error-pre)\r\n #print(\"diff \",diff)\r\n if diff <= 1:\r\n break\r\n pre = error\r\n\r\n update_U(height, width, k, lu, lv)\r\n update_V(height, width, k, lu, lv)\r\n\r\n uu = sparse.lil_matrix(U)\r\n vv = sparse.lil_matrix(V)\r\n temp = uu * vv\r\n\r\n err = sp_calc_error(temp, height, width, k)\r\n print(\"training error\", err)\r\n\r\n height = len(val_all)\r\n width = len(val_all[0])\r\n height = 500\r\n width = 100\r\n\r\n main_mat = lil_matrix((height, width), dtype=numpy.float)\r\n\r\n U = numpy.matlib.zeros((height, k))\r\n\r\n for i in range(0,height):\r\n for j in range (0,width):\r\n if val_all[i][j] == -1:\r\n continue\r\n main_mat[i, j] = val_all[i][j]\r\n\r\n update_U(height, width, k, lu, lv)\r\n\r\n uu = sparse.lil_matrix(U)\r\n vv = sparse.lil_matrix(V)\r\n temp = uu * vv\r\n\r\n\r\n err = sp_calc_error(temp, height, width, k)\r\n print(\"validation error\",err)\r\n\r\n if ac_ref == -1:\r\n ac_ref = err\r\n ac_parameter = (lu, lv, k)\r\n best_V = vv.copy()\r\n elif ac_ref > err:\r\n ac_ref = err\r\n ac_parameter = (lu, lv, k)\r\n best_V = vv.copy()\r\n\r\n\r\n\r\nprint(ac_parameter)\r\n","repo_name":"Protick666/ALS-project","sub_path":"ALS/final_ALS.py","file_name":"final_ALS.py","file_ext":"py","file_size_in_byte":6216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40197952535","text":"\"\"\" import sys\nimport re\n# 55-50+40-00009-00009\n# n = sys.stdin.readline().rstrip().split('-')\n\n\n\n\n# result = 0\nfor i in n:\n if i == n[0]:\n result = eval(i)\n else:\n result -= eval(i)\nprint(result) \"\"\"\na = input().split('-')\nnum = []\nfor i in a:\n cnt = 0\n s = i.split('+')\n for j in s:\n cnt += int(j)\n num.append(cnt)\nn = num[0]\nfor i in range(1, len(num)):\n n -= num[i]\nprint(n)","repo_name":"hyeonDD/coding-test","sub_path":"baekjoon/python/greedy/1541/lost_parenthesis.py","file_name":"lost_parenthesis.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"371023031","text":"import os\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import metrics as mt\n\n\ndef getResultsBinary(labels, predictions, local_results_model):\n # get ROC curve vectors\n fpr, tpr, thresholds = mt.roc_curve(labels, predictions, pos_label=1)\n\n # get AUC of the curve\n roc_auc = mt.auc(fpr, tpr)\n\n # find the best threshold\n A = tpr - fpr\n ind = A.argmax(axis=0)\n threshold = thresholds[ind]\n\n # get PR curve vectors\n precision, recall, _ = mt.precision_recall_curve(labels, predictions)\n\n # get AUC of the curve\n prc_auc = mt.auc(recall, precision)\n\n # get quality indicators\n print('TESTING:', type(labels), type(predictions > threshold))\n print(labels[0:10], predictions[0:10])\n prec, reca, f1sc, supp = mt.precision_recall_fscore_support(labels, predictions > threshold, average='binary')\n acc = mt.accuracy_score(labels, predictions > threshold)\n\n # print into file\n f1 = open(local_results_model+'/stats.txt','a+')\n\n # print quality indicatorss\n f1.write('Stats :\\n')\n f1.write('ROC AUC: {0} \\nPRC AUC: {1}\\n'.format(roc_auc, prc_auc))\n f1.write('Based on threshold: {0} the following performance metrics are:\\n'.format(threshold))\n f1.write('Accuracy: {0} \\nPrecision: {1} \\nRecall: {2} \\nF1-score: {3}\\n\\n'.format(acc, prec, reca, f1sc))\n f1.close()\n # plot ROC\n # plt.figure()\n # plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % (roc_auc))\n # plt.plot([0, 1], [0, 1], 'k--')\n # plt.xlim([0.0, 1.0])\n # plt.ylim([0.0, 1.05])\n # plt.xlabel('False Positive Rate')\n # plt.ylabel('True Positive Rate')\n # plt.title('Receiver operating characteristic '+key)\n # plt.legend(loc=\"lower right\")\n # plt.savefig(local_results_model+'ROC_MODEL_'+key+'.pdf', bbox_inches='tight')\n #\n # plot PRC\n # plt.figure()\n # plt.plot(recall, precision, label='PR curve (area = %0.2f)' % (prc_auc))\n # plt.plot([0, 1], [0, 1], 'k--')\n # plt.xlim([0.0, 1.0])\n # plt.ylim([0.0, 1.05])\n # plt.xlabel('Recall')\n # plt.ylabel('Precision')\n # plt.title('Precision-Recall curve '+key)\n # plt.legend(loc=\"lower left\")\n # plt.savefig(local_results_model+'PRC_MODEL_'+key+'.pdf', bbox_inches='tight')\n\n\ndef getResultsMulti(labels, predictions, local_results_model):\n labels_multi = np.asarray(labels)\n predictions_multi = np.zeros([len(labels), 6])\n print(type(predictions), predictions.shape)\n\n maxis_label = labels_multi.argmax(axis=1).tolist()\n\n maxis = predictions.argmax(axis=1).tolist()\n for i,m in enumerate(maxis):\n predictions_multi[i,m] = predictions[i,m] #1\n\n numerator = 0\n denominator = len(maxis_label)\n for i in range(len(maxis_label)):\n if maxis_label[i] == maxis[i]:\n numerator += 1\n\n confusion_matrix = mt.confusion_matrix(maxis_label, maxis)\n # print into file\n f1 = open(local_results_model + '/stats.txt', 'a+')\n f1.write('Overall Accuracy of the model is ' + str(float(numerator)/float(denominator)) + ' :\\n')\n for i in range(6):\n print('Getting results for class:', i)\n\n labels_class = labels_multi[:,i]\n predictions_class = predictions_multi[:,i]\n print('Sum labels:', np.sum(labels_class))\n print('Sum preds:', np.sum(predictions_class))\n\n # get ROC curve vectors\n fpr, tpr, thresholds = mt.roc_curve(labels_class, predictions_class, pos_label=1)\n\n # get AUC of the curve\n roc_auc = mt.auc(fpr, tpr)\n\n # find the best threshold\n A = tpr - fpr\n ind = A.argmax(axis=0)\n threshold = thresholds[ind]\n\n # get PR curve vectors\n precision, recall, _ = mt.precision_recall_curve(labels_class, predictions_class)\n\n # get AUC of the curve\n prc_auc = mt.auc(recall, precision)\n\n # get quality indicators\n print('TESTING:', type(labels_class), type(predictions_class > threshold))\n print(labels_class[0:10], predictions_class[0:10])\n prec, reca, f1sc, supp = mt.precision_recall_fscore_support(labels_class.tolist(), predictions_class > threshold, average='binary')\n acc = mt.accuracy_score(labels_class, predictions_class > threshold)\n\n\n\n # print quality indicatorss\n f1.write('Stats for class '+ str(i) +' :\\n')\n f1.write('ROC AUC: {0} \\nPRC AUC: {1}\\n'.format(roc_auc, prc_auc))\n f1.write('Based on threshold: {0} the following performance metrics are:\\n'.format(threshold))\n f1.write('Accuracy: {0} \\nPrecision: {1} \\nRecall: {2} \\nF1-score: {3}\\n\\n'.format(acc, prec, reca, f1sc))\n\n f1.write('\\n')\n # f1.write(os.linesep.join(map(''.join, confusion_matrix)))\n f1.writelines('\\t'.join(str(j) for j in i) + '\\n' for i in confusion_matrix.tolist())\n f1.close()\n\n\n","repo_name":"gligorijevic/DeepAttentionModel","sub_path":"resultsAnalysis.py","file_name":"resultsAnalysis.py","file_ext":"py","file_size_in_byte":4822,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"37329809973","text":"from tkinter import *\n\nroot = Tk()\nroot.title('Calculator')\n\n# hello\n\nequation = Entry(root, width=50, borderwidth=5)\nequation.grid(row=0, column=0, columnspan=3, padx=10, pady=10)\n\n\ndef click(number):\n current = equation.get()\n equation.delete(0, END)\n equation.insert(0, str(current) + str(number))\n\n\ndef clear():\n equation.delete(0, END)\n\n\ndef add():\n first = equation.get()\n global f_num\n global math\n math = 'addition'\n f_num = int(float(first))\n equation.delete(0, END)\n\n\ndef sub():\n first = equation.get()\n global f_num\n global math\n math = 'subtract'\n f_num = int(float(first))\n equation.delete(0, END)\n\n\ndef mul():\n first = equation.get()\n global f_num\n global math\n math = 'multiply'\n f_num = int(float(first))\n equation.delete(0, END)\n\n\ndef div():\n first = equation.get()\n global f_num\n global math\n math = 'divide'\n f_num = int(float(first))\n equation.delete(0, END)\n\n\ndef equal():\n second = equation.get()\n equation.delete(0, END)\n if math == 'addition':\n equation.insert(0, f_num + int(second))\n elif math == 'subtract':\n equation.insert(0, f_num - int(second))\n elif math == 'multiply':\n equation.insert(0, f_num * int(second))\n elif math == 'divide':\n if int(second) == 0:\n return equation.insert(0, \"Divide by 0 Error\")\n equation.insert(0, f_num / int(second))\n else:\n equation.delete(0, END)\n\n# Define buttons\n\n\nbutt0 = Button(root, text='0', padx=40, pady=20, command=lambda: click(0))\nbutt1 = Button(root, text='1', padx=40, pady=20, command=lambda: click(1))\nbutt2 = Button(root, text='2', padx=40, pady=20, command=lambda: click(2))\nbutt3 = Button(root, text='3', padx=41, pady=20, command=lambda: click(3))\nbutt4 = Button(root, text='4', padx=40, pady=20, command=lambda: click(4))\nbutt5 = Button(root, text='5', padx=40, pady=20, command=lambda: click(5))\nbutt6 = Button(root, text='6', padx=41, pady=20, command=lambda: click(6))\nbutt7 = Button(root, text='7', padx=40, pady=20, command=lambda: click(7))\nbutt8 = Button(root, text='8', padx=40, pady=20, command=lambda: click(8))\nbutt9 = Button(root, text='9', padx=41, pady=20, command=lambda: click(9))\nbutt_clear = Button(root, text='C', padx=95, pady=20, command=clear)\nbutt_eq = Button(root, text='=', padx=95, pady=20, command=equal)\nbutt_add = Button(root, text='+', padx=39, pady=20, command=add)\nbutt_sub = Button(root, text='-', padx=41, pady=20, command=sub)\nbutt_mul = Button(root, text='*', padx=40, pady=20, command=mul)\nbutt_div = Button(root, text='/', padx=40, pady=20, command=div)\n\n# buttons on screen\n\nbutt7.grid(row=1, column=0)\nbutt8.grid(row=1, column=1)\nbutt9.grid(row=1, column=2)\n\nbutt4.grid(row=2, column=0)\nbutt5.grid(row=2, column=1)\nbutt6.grid(row=2, column=2)\n\nbutt1.grid(row=3, column=0)\nbutt2.grid(row=3, column=1)\nbutt3.grid(row=3, column=2)\n\nbutt0.grid(row=4, column=1)\nbutt_add.grid(row=4, column=0)\nbutt_sub.grid(row=4, column=2)\n\nbutt_clear.grid(row=5, column=1, columnspan=2)\nbutt_mul.grid(row=5, column=0)\n\nbutt_div.grid(row=6, column=0)\nbutt_eq.grid(row=6, column=1, columnspan=2)\n\nroot.mainloop()","repo_name":"thelonesaiyan/Tkinter","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10484072656","text":"\"\"\"\nModels for \"webcast attendees\" package.\n\"\"\"\n\nfrom sqlalchemy import UniqueConstraint\nfrom sqlalchemy.dialects.postgresql import JSONB\n\nfrom app import db\nfrom app.base.models import BaseModel\nfrom app.base.model_fields import CastingArray\n# related model imports done in webcasts/__init__\n\n\nclass WebcastAttendee(BaseModel):\n\n __tablename__ = 'webcast_attendee'\n\n created_by = db.Column(db.BigInteger, db.ForeignKey(\n 'user.id', name='webcast_attendee_created_by_fkey',\n ondelete='CASCADE'), nullable=False)\n updated_by = db.Column(db.BigInteger, db.ForeignKey(\n 'user.id', name='webcast_attendee_updated_by_fkey',\n ondelete='CASCADE'), nullable=False)\n webcast_id = db.Column(db.BigInteger, db.ForeignKey(\n 'webcast.id', name='webcast_attendee_webcast_id_fkey',\n ondelete='CASCADE'), nullable=False)\n attendee_id = db.Column(db.BigInteger, db.ForeignKey(\n 'user.id', name='webcast_attendee_attendee_id_fkey',\n ondelete='CASCADE'), nullable=False)\n\n rating = db.Column(db.Integer)\n comment = db.Column(db.String(256))\n\n # fields for big marker api\n entered_at = db.Column(db.DateTime)\n leaved_at = db.Column(db.DateTime)\n total_duration = db.Column(db.String(16))\n engaged_duration = db.Column(db.String(16))\n chats_count = db.Column(db.BigInteger())\n qas_count = db.Column(db.BigInteger())\n polls_count = db.Column(db.BigInteger())\n polls = db.Column(CastingArray(JSONB))\n questions = db.Column(db.ARRAY(db.String()))\n handouts = db.Column(db.ARRAY(db.String()))\n browser_name = db.Column(db.String(128))\n browser_version = db.Column(db.String(128))\n device_name = db.Column(db.String(128))\n\n # multi column\n __table_args__ = (\n UniqueConstraint('webcast_id', 'attendee_id',\n name='c_webcast_id_attendee_id_key'),\n )\n\n # relationships\n webcast = db.relationship('Webcast', backref=db.backref(\n 'webcast_attendee', lazy='dynamic', passive_deletes=True))\n attendee = db.relationship('User', backref=db.backref(\n 'webcasts_attended', lazy='dynamic'),\n foreign_keys='WebcastAttendee.attendee_id')\n\n def __init__(self, created_by=None, webcast_id=None, updated_by=None,\n attendee_id=None, *args, **kwargs):\n self.created_by = created_by\n self.updated_by = updated_by\n self.webcast_id = webcast_id\n self.attendee_id = attendee_id\n super(WebcastAttendee, self).__init__(*args, **kwargs)\n\n def __repr__(self):\n return '' % (self.row_id)\n","repo_name":"Witzcode0/Exchange-connect","sub_path":"app/webcast_resources/webcast_attendees/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"645419256","text":"import typer\nimport asyncio\nimport subprocess\nfrom main.main import create_dev_app\nfrom useradmin.models import async_main as userData,droptables as userDrop\nfrom simplecms.models.dbconnect import async_main as cmsData,droptables as cmsDrop\n \n \ncapp = typer.Typer()\napp=create_dev_app()\n\n@capp.command()\ndef rung():\n \"\"\"starts gunicorn server of the app with uvicorn works bound to 0.0.0.0:9000 with one worker\n \"\"\"\n subprocess.run([\"gunicorn\", \"manage:app\", \"-k\" ,\"uvicorn.workers.UvicornWorker\",\"-b\" ,\"0.0.0.0:9000\",\"--reload\",\"-w\",\"1\"]) \n\n@capp.command()\ndef upgrade():\n \"\"\"creates base models based on their methadata\"\"\"\n asyncio.run(userData())\n asyncio.run(cmsData())\n\n@capp.command()\ndef drop():\n \"\"\"drops all tables created from provided database\"\"\"\n asyncio.run(userDrop())\n asyncio.run(cmsDrop())\n\n@capp.command()\ndef test(location):\n \"\"\"\n Takes location of test locations as arguments. this argument is required \n \"\"\"\n subprocess.run([\"pytest\", location,\"--asyncio-mode=strict\"])\n \n\nif __name__ == \"__main__\":\n capp() ","repo_name":"bushubeke/fast-scaleable-struct","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12364267157","text":"import argparse\nimport json\nimport pandas as pd\n\n\ndef main(flags):\n \"\"\"Pretty print rankings.\n\n Args:\n flags : run flags\n \"\"\"\n\n rankings = json.load(open(flags.rankings_file, 'r'))\n queries = pd.read_csv(flags.queries)\n corpus = pd.read_csv(flags.corpus)\n\n output = []\n\n for idx, query_ranking in enumerate(rankings):\n new_query = {}\n new_query['query'] = queries.iloc[idx]['query']\n matches = []\n for ranking in query_ranking:\n matches.append(\n corpus[corpus['_id'] == ranking[\"corpus_id\"]]['corpus'].values[0]\n )\n new_query[\"results\"] = matches\n output.append(new_query)\n \n print(json.dumps(output, indent=2))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--rankings_file',\n type=str,\n required=True,\n help=\"rankings from query search\"\n )\n\n parser.add_argument('--queries',\n required=True,\n help=\"raw queries file\",\n type=str\n )\n\n parser.add_argument('--corpus',\n required=True,\n help=\"raw corpus file\",\n type=str\n )\n\n FLAGS = parser.parse_args()\n\n main(FLAGS)\n","repo_name":"oneapi-src/vertical-search-engine","sub_path":"src/display_rankings.py","file_name":"display_rankings.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21126847839","text":"import argparse\nimport subprocess\n\n\nZIP_URL = ('https://github.com/google/protobuf/releases/download/v3.3.0/'\n 'protoc-3.3.0-linux-x86_64.zip')\n\n\ndef create_asset(target_dir):\n \"\"\"Create the asset.\"\"\"\n local_zip = '/tmp/protoc.zip'\n subprocess.check_call(['curl', '-L', ZIP_URL, '-o', local_zip])\n subprocess.check_call(['unzip', local_zip, '-d', target_dir])\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--target_dir', '-t', required=True)\n args = parser.parse_args()\n create_asset(args.target_dir)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"google/skia","sub_path":"infra/bots/assets/protoc/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":8112,"dataset":"github-code","pt":"81"} +{"seq_id":"41689417656","text":"#!/usr/bin/env python\n\nimport pytest\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal, assert_almost_equal\nfrom ortools.graph.pywrapgraph import SimpleMinCostFlow\nfrom sklearn.metrics import euclidean_distances\n\nfrom k_means_constrained.k_means_constrained_ import minimum_cost_flow_problem_graph\nfrom k_means_constrained.mincostflow_vectorized import SimpleMinCostFlowVectorized\n\ndef test_SimpleMinCostFlowVectorized_equivalence():\n # Setup graph\n X = np.array([\n [0, 0],\n [1, 2],\n [1, 4],\n [1, 0],\n [4, 2],\n [4, 4],\n [4, 0],\n [4, 4]\n ])\n C = np.array([\n [0, 0],\n [4, 4]\n ])\n size_min, size_max = 3, 10\n D = euclidean_distances(X, C, squared=True)\n\n edges, costs, capacities, supplies, n_C, n_X = minimum_cost_flow_problem_graph(X, C, D, size_min, size_max)\n\n ## Original version\n min_cost_flow = SimpleMinCostFlow()\n N_edges = edges.shape[0]\n N_nodes = len(supplies)\n\n for i in range(0, N_edges):\n min_cost_flow.AddArcWithCapacityAndUnitCost(int(edges[i, 0]), int(edges[i, 1]),\n int(capacities[i]), int(costs[i]))\n\n for i in range(0, N_nodes):\n min_cost_flow.SetNodeSupply(i, int(supplies[i]))\n\n if min_cost_flow.Solve() != min_cost_flow.OPTIMAL:\n raise Exception('There was an issue with the min cost flow input.')\n\n labels_M = np.array([min_cost_flow.Flow(i) for i in range(n_X*n_C)]).reshape(n_X, n_C)\n\n\n ## Vectorised version\n min_cost_flow_vec = SimpleMinCostFlowVectorized()\n\n min_cost_flow_vec.AddArcWithCapacityAndUnitCostVectorized(edges[:,0], edges[:,1], capacities, costs)\n min_cost_flow_vec.SetNodeSupplyVectorized(np.arange(N_nodes, dtype='int32'), supplies)\n\n if min_cost_flow_vec.Solve() != min_cost_flow_vec.OPTIMAL:\n raise Exception('There was an issue with the min cost flow input.')\n\n labels_M_vec = min_cost_flow_vec.FlowVectorized(np.arange(n_X * n_C, dtype='int32')).reshape(n_X, n_C)\n\n ## Should be equivalence\n assert_array_equal(labels_M, labels_M_vec)\n","repo_name":"Raihan-Seraj/Clustering-Analysis","sub_path":"classifiers/k-mean-constrained/tests/test_mincostflow_vectorized.py","file_name":"test_mincostflow_vectorized.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1243056556","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 24 18:16:17 2014\n\n@author: md238665\n\nTest that we correctly treat the case where no resampling is given.\nCopied from test_mapreduce.\n\nThe resample function is not needed. The mapper function uses the same dataset\nfor train and test.\n\n\"\"\"\n\nimport os\nimport json\nimport numpy as np\nimport tempfile\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.metrics import r2_score\nimport pandas as pd\n\nfrom collections import OrderedDict\n\n\ndef load_globals(config):\n import mapreduce as GLOBAL # access to global variables\n GLOBAL.DATA = GLOBAL.load_data(config[\"data\"])\n\n\ndef mapper(key, output_collector):\n import mapreduce as GLOBAL # access to global variables\n X_train = GLOBAL.DATA[\"X\"]\n X_test = GLOBAL.DATA[\"X\"]\n y_train = GLOBAL.DATA[\"y\"].ravel()\n y_test = GLOBAL.DATA[\"y\"].ravel()\n mod = ElasticNet(alpha=key[0], l1_ratio=key[1])\n y_pred = mod.fit(X_train, y_train).predict(X_test)\n output_collector.collect(key, dict(y_pred=y_pred, y_true=y_test))\n\n\ndef reducer(key, values):\n # values are OutputCollectors containing a path to the results.\n # load return dict corresponding to mapper ouput. they need to be loaded.\n values = [item.load() for item in values.values()]\n y_true = np.concatenate([item[\"y_true\"].ravel() for item in values])\n y_pred = np.concatenate([item[\"y_pred\"].ravel() for item in values])\n d = OrderedDict()\n d['r2'] = r2_score(y_true, y_pred)\n return d\n\n\nif __name__ == \"__main__\":\n WD = tempfile.mkdtemp()\n\n ###########################################################################\n ## Create dataset\n np.random.seed(13031981)\n n, p = 50, 100\n X = np.random.rand(n, p)\n beta = np.random.rand(p, 1)\n y = np.dot(X, beta)\n np.save(os.path.join(WD, 'X.npy'), X)\n np.save(os.path.join(WD, 'y.npy'), y)\n\n ###########################################################################\n ## Create config file without resampling\n params = [[alpha, l1_ratio]\n for alpha in [0.1, 1] for l1_ratio in [.1, .5, 1.]]\n user_func_filename = os.path.abspath(__file__)\n\n # mapreduce will set its WD to the directory that contains the config file\n # use relative path\n config = dict(data=dict(X=\"X.npy\",\n y=\"y.npy\"),\n params=params,\n map_output=\"results\",\n user_func=user_func_filename,\n reduce_output=\"results.csv\")\n json.dump(config, open(os.path.join(WD, \"config.json\"), \"w\"))\n exec_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n \"..\", \"mapreduce.py\"))\n ###########################################################################\n ## Apply map\n map_cmd = \"%s -v --map %s/config.json\" % (exec_path, WD)\n reduce_cmd = \"%s -v --reduce %s/config.json\" % (exec_path, WD)\n os.system(map_cmd)\n os.system(reduce_cmd)\n\n ###########################################################################\n ## Do it without mapreduce\n res = list()\n for key in params:\n # key = params[0]\n y_true = list()\n y_pred = list()\n X_train = X\n X_test = X\n y_train = y.ravel()\n y_test = y.ravel()\n mod = ElasticNet(alpha=key[0], l1_ratio=key[1])\n y_pred.append(mod.fit(X_train, y_train).predict(X_test))\n y_true.append(y_test)\n y_true = np.hstack(y_true)\n y_pred = np.hstack(y_pred)\n # As we reload mapreduce results, the params will be interpreted as\n # strings representation of tuples.\n # Here we apply the same representation\n res.append([str(tuple(key)), r2_score(y_true, y_pred)])\n true = pd.DataFrame(res, columns=[\"params\", \"r2\"])\n mr = pd.read_csv(os.path.join(WD, 'results.csv'))\n # Check same keys\n assert np.all(np.sort(true.params) == np.sort(mr.params))\n m = pd.merge(true, mr, on=\"params\", suffixes=[\"_true\", \"_mr\"])\n # Check same scores\n assert np.allclose(m.r2_true, m.r2_mr)\n","repo_name":"neurospin/scripts","sub_path":"mapreduce/tests/test_mapreduce_noresample.py","file_name":"test_mapreduce_noresample.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40253646463","text":"import streamlit as st\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom itertools import combinations,permutations\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom pickle import dump\nimport os\nimport numpy as np\n\nle = LabelEncoder()\n\n@st.cache(allow_output_mutation=True,persist = True)\ndef load_data(file):\n \"\"\"\n Load data from file\n \"\"\"\n try:\n df = pd.read_csv(file) if file.name.endswith('csv') else pd.read_excel(file)\n return df\n except Exception as e:\n st.error(f\"Error: {e}\")\n return None\n\ndef display_data(df):\n \"\"\"\n Display data in dataframe format\n \"\"\"\n if df is not None:\n df_style = df.style.apply(lambda x: [\"background: yellow\" if pd.isnull(v) else \"\" for v in x], axis = 1)\n st.write(df_style)\n\n\ndef check_duplicate_cols(df):\n \"\"\"\n Check for duplicate columns based on matching column values\n \"\"\"\n duplicate_cols = []\n for i, col1 in enumerate(df.columns):\n for col2 in df.columns[i+1:]:\n if (df[col1] == df[col2]).all():\n duplicate_cols.append(col2)\n df.drop(col2, axis=1, inplace=True)\n if len(duplicate_cols) > 0:\n st.warning(f\"Duplicate columns found with matching values: {', '.join(duplicate_cols)}. Removed duplicate column(s).\")\n else:\n st.success(\"No duplicate columns found with matching values.\")\n\t\n\ndef check_single_value_cols(df):\n \"\"\"\n Check for columns with a single value\n \"\"\"\n single_value_cols = []\n for col in df.columns:\n if df[col].nunique() == 1:\n single_value_cols.append(col)\n df.drop(col, axis=1, inplace=True)\n if len(single_value_cols) > 0:\n st.warning(f\"Columns with single value found: {', '.join(single_value_cols)}. Removed single value column(s).\")\n else:\n st.success(\"No columns with single value found.\")\n\ndef check_duplicate_rows(df):\n \"\"\"\n Check for duplicate rows based on matching row values\n \"\"\"\n duplicate_rows = df[df.duplicated()]\n if len(duplicate_rows) > 0:\n df.drop_duplicates(inplace=True)\n st.warning(f\"Duplicate rows found. Removed {len(duplicate_rows)} duplicate row(s).\")\n else:\n st.success(\"No duplicate rows found.\")\n\n \ndef display_missing_values(df):\n \"\"\"\n Display rows with missing values\n \"\"\"\n missing_rows = df[df.isnull().any(axis=1)]\n if len(missing_rows) > 0:\n st.warning(f\"Missing values found in {len(missing_rows)} row(s).\")\n display_data(missing_rows)\n \n else:\n st.success(\"No missing values found.\")\n return missing_rows\n \n \ndef make_bar_chart(column, title, ylabel, xlabel, y_offset=0.12, x_offset=700):\n ax = df.groupby(column).median()[['charges']].plot(\n kind='bar', figsize=(10, 6), fontsize=13, color='#4f4f4f'\n )\n ax.set_title(title, size=20, pad=30)\n ax.set_ylabel(ylabel, fontsize=14)\n ax.set_xlabel(xlabel, fontsize=14)\n ax.get_legend().remove()\n \n for i in ax.patches:\n ax.text(i.get_x() + x_offset, i.get_height() + y_offset, f'${str(round(i.get_height(), 2))}', fontsize=15)\n return ax\n \ndef show_summary_statistics(df):\n st.write(\"Summary Statistics:\")\n st.write(df.describe())\n \ndef encode_binary_column(df):\n for feature in df.columns:\n if df[feature].nunique() == 2:\n unique_values = list(df[feature].unique())\n df[feature] = df[feature].apply(lambda x: 0 if x == unique_values[0] else 1)\n st.success(f\"{feature} has been encoded as binary 0 and 1.\")\n else:\n st.success(\"No Column found with2 unique.\")\n\n \ndef plot_distribution(df, column):\n plt.figure(figsize=(8, 6))\n sns.histplot(df[column], kde=False)\n plt.xlabel(column)\n plt.ylabel(\"Count\")\n st.pyplot()\n \ndef display_unique_values(data):\n st.write(\"Unique values:\")\n unique_values = pd.DataFrame(columns=[\"Feature\", \"Unique Values\"])\n for feature in data.columns:\n unique_values = unique_values.append({\"Feature\": feature, \"Unique Values\": data[feature].unique()}, ignore_index=True)\n st.table(unique_values)\n \ndef one_hot_encode(df, cols):\n for col in cols:\n categories = df[col].unique()\n for category in categories:\n new_col_name = col + '_' + str(category)\n df[new_col_name] = np.where(df[col] == category, 1, 0)\n df.drop(columns=[col], inplace=True)\n \ndef check_unique_values(df):\n unique_cols = []\n for col in df.columns:\n if df[col].nunique() == df[col].count():\n unique_cols.append(col)\n if len(unique_cols) > 0:\n st.warning(\"The following columns contain only unique values and will be removed:\")\n st.write(unique_cols)\n df.drop(columns=unique_cols, inplace=True)\n else:\n st.success(\"There are no columns that contain only unique values.\")\n \n \ndef check_val(df,col,val):\n miss = df[df[col]==val].index.to_list()\n st.write(len(miss))\n if (len(miss) <= 0):\n st.success(\"No value found\")\n else:\n st.warning(\"Values found\")\n st.write(df[df[col]==val])\n df[df[col]==val] = np.nan\n \n \n\ndef main():\n st.set_option('deprecation.showPyplotGlobalUse', False)\n st.title(\"Machine Learning Made Easy\")\n\n # file upload widget\n file = st.file_uploader(\"Upload file\", type=[\"csv\", \"xlsx\"])\n X = []\n Y = []\n target=None\n # if user uploads a file\n if file is not None:\n # load data\n df = load_data(file)\n # display data\n display_data(df)\n\n # check for duplicate columns\n if st.button(\"Check for Duplicate Columns\"):\n check_duplicate_cols(df)\n #display_data(df)\n \n\n # check for columns with a single value\n if st.button(\"Check for Single Value Columns\"):\n check_single_value_cols(df)\n #display_data(df)\n \n if st.button(\"Check for Unique Columns\"):\n check_unique_values(df)\n\n # check for duplicate rows\n if st.button(\"Check for Duplicate Rows\"):\n check_duplicate_rows(df)\n #display_data(df)\n\n # check for missing values\n if st.button(\"Check for Missing Values\"):\n missing_values=display_missing_values(df)\n if len(missing_values) > 0:\n df.dropna(inplace=True)\n st.success(\"Data with missing values removed\")\n display_data(df)\n \n if st.button(\"Show Summary\"):\n show_summary_statistics(df)\n \n if st.button(\"show all unique values\"):\n display_unique_values(df)\n \n \n col = st.selectbox(\"Select a column:\", df.columns)\n val = st.selectbox(\"Select a value:\", df[col].unique().tolist())\n if st.button(\"remove perticular value\"):\n check_val(df,col,val)\n \n \n if st.button(\"Encode binary column\"):\n encode_binary_column(df)\n display_data(df)\n \n if st.button(\"Dtype\"):\n st.write(df.dtypes)\n \n if st.button('Display'):\n display_data(df)\n \n \n \n feature_to_encode = st.selectbox(\"Select a categorical feature to encode:\", df.select_dtypes(include=\"object\").columns)\n if st.button(\"Encode categorical column\"):\n #one_hot_encode(df, feature_to_encode)\n df[feature_to_encode] = le.fit_transform(df[feature_to_encode])\n st.success(\"encoded.\")\n \n numerical_columns = df.select_dtypes(include=[\"float64\", \"int64\"]).columns.tolist()\n column = st.selectbox(\"Select a numerical variable to plot the distribution:\", numerical_columns)\n plot_distribution(df, column)\n \n \n delete_col = st.selectbox(\"Select a column to delete:\", df.columns)\n # Add a button to delete the selected column\n if st.button(\"Delete Column\"):\n df.drop(columns=[delete_col], inplace=True)\n st.success(\"Column\", delete_col, \"has been deleted.\")\n \n problem = st.selectbox(\"What problem you trying to solve:\", list(['regression','classfication']))\n if problem == 'classfication':\n \n # Add a button to delete the selected column\n target_col = st.selectbox(\"Select a target data:\", df.columns)\n if st.button(\"label encode\"):\n df[target_col] = le.fit_transform(df[target_col])\n dump(le, open('enc.pkl','wb'))\n st.success(\"encoded.\") \n \n title = st.text_input('ENter the file name to save')\n if st.button('save files'):\n if title:\n try:\n \n df.to_csv(txt, index=None)\n st.success(f\"File saved successfully as {title}\")\n except FileNotFoundError:\n st.error(\"Invalid file path. Please enter a valid path.\")\n except Exception as e:\n st.error(\"An error occurred while saving the file:\", e)\n \n\n \n\nif __name__ == \"__main__\": \n main()\n\n","repo_name":"MMj4beer/Machine-learning-made-easy","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3163097857","text":"from . import register, ObjectHolder, handler\nfrom ..client import events\nfrom ..models.channel import TextChannel, VoiceChannel\n\n\n@register('channels')\nclass ChannelStore(ObjectHolder):\n @handler(events.READY)\n def handle_ready(self, ready_packet):\n for guild in ready_packet['guilds']:\n for channel in guild['channels']:\n channel['guild_id'] = guild['id']\n self.upsert(channel)\n\n @handler(events.CHANNEL_CREATE, events.CHANNEL_UPDATE)\n def handle_channel_create_or_update(self, channel):\n self.upsert(channel)\n\n @handler(events.GUILD_CREATE)\n def handle_guild_create(self, guild):\n for channel in guild['channels']:\n channel['guild_id'] = guild['id']\n self.upsert(channel)\n\n @handler(events.GUILD_DELETE)\n def handle_guild_create(self, guild):\n guild_id = guild['id']\n for channel_id, channel in list(self.items()):\n if channel.guild_id == guild_id:\n self.delete(channel.id)\n\n @handler(events.CHANNEL_DELETE)\n def handle_channel_delete(self, channel):\n self.delete(channel)\n\n def make_object(self, data):\n type = data['type']\n id = data['id']\n guild_id = data['guild_id']\n\n if type == 'text':\n return TextChannel(self._stores, id, guild_id)\n\n elif type == 'voice':\n return VoiceChannel(self._stores, id, guild_id)","repo_name":"jhgg/dissonance","sub_path":"dissonance/stores/channels.py","file_name":"channels.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"39864322766","text":"\"\"\"\nDavid Chang\nStudent ID: 1487883\nCSC 594 Homework #3\n\"\"\"\n\nimport sys\nimport math\nimport numpy as np\n\ntagCounts = {}\nwordTagCounts = {}\ntagTagCounts = {}\n\n\ndef addToTagCounts(tag):\n \"\"\" Add to tag counts dictionary \"\"\"\n if tag in tagCounts:\n tagCounts[tag] = tagCounts[tag] + 1\n else:\n tagCounts[tag] = 1\n\n\ndef addToWordTagCounts(wordTag):\n \"\"\" Add to word tag counts dictionary \"\"\"\n if wordTag in wordTagCounts:\n wordTagCounts[wordTag] = wordTagCounts[wordTag] + 1\n else:\n wordTagCounts[wordTag] = 1\n\n\ndef addToTagTagCounts(tagTag):\n \"\"\" Add to tag to tag transition count dictionary \"\"\"\n if tagTag in tagTagCounts:\n tagTagCounts[tagTag] = tagTagCounts[tagTag] + 1\n else:\n tagTagCounts[tagTag] = 1\n\n\ndef parseTrainingText(filename):\n \"\"\"\n Read in training file line by line and count how\n many times tags, words associated with tags, and tag\n transitions occur in the text\n \"\"\"\n prevTag = \"\"\n with open(filename) as f:\n for line in f:\n line = line.strip()\n if line != \"\":\n tup = line.split()\n word = tup[0]\n tag = tup[1]\n wordTag = (word, tag)\n\n if prevTag == \"\":\n # Start of sentence\n addToTagCounts('start')\n addToTagTagCounts(('start', tag))\n else:\n addToTagTagCounts((prevTag, tag))\n\n addToTagCounts(tag)\n addToWordTagCounts(wordTag)\n prevTag = tag\n else:\n # End of sentence\n prevTag = \"\"\n\n\n# Read and parse training text\nfilename = \"WSJ-train.txt\" if len(sys.argv) == 1 else sys.argv[1]\nparseTrainingText(filename)\n\n# Calculate probabilities of word given a tag\nwordTagProbs = {}\nfor wordTag, count in wordTagCounts.items():\n tag = wordTag[1]\n wordTagProbs[wordTag] = math.log(count / tagCounts[tag])\n\n# Calculate probabilities of tag to tag transitions\ntagTagProbs = {}\nfor tagTag, count in tagTagCounts.items():\n firstTag = tagTag[0]\n tagTagProbs[tagTag] = math.log(count / tagCounts[firstTag])\n\n# Read in the full test file\ntestFilePath = \"WSJ-test.txt\" if len(sys.argv) == 1 else sys.argv[2]\n#testFilePath = \"test.txt\" if len(sys.argv) == 1 else sys.argv[2]\nTEST_FILE = open(testFilePath, 'r')\nTEST_TEXT = TEST_FILE.read().strip()\nTEST_FILE.close()\nsentences = TEST_TEXT.split('\\n\\n')\n\noverallTotal = 0\noverallCorrect = 0\n\n# For each sentence, use the HMM we created from the probabilites\n# and apply the viterbi algorithm to calculate the most likely\n# states for each word in the sentence.\nfor sentence in sentences:\n lines = sentence.strip().split('\\n')\n words = []\n tags = []\n for line in lines:\n words.append(line.split()[0])\n tags.append(line.split()[1])\n x_length = len(words)\n y_length = len(tagCounts)\n\n # create viterbi matrix\n probMatrix = [[0 for x in range(x_length)] for y in range(y_length)]\n maxPrevIndMatrix = [[0 for x in range(x_length)] for y in range(y_length)]\n\n # fill in the first column with probabilities\n tagArr = list(tagCounts.keys())\n for i in range(len(tagArr)):\n tag = tagArr[i]\n currTuple = (words[0], tag)\n prob = float(\"-inf\")\n\n if currTuple in wordTagProbs and ('start', tag) in tagTagProbs:\n prob = tagTagProbs[('start', tag)] + wordTagProbs[currTuple]\n\n probMatrix[i][0] = prob\n\n for wordInd in range(1, len(words)):\n for currTagInd in range(len(tagArr)):\n currTup = (words[wordInd], tagArr[currTagInd])\n\n maxProbKy = -1\n maxProbVal = None\n for priorTagInd in range(len(tagArr)):\n tagTup = (tagArr[priorTagInd], tagArr[currTagInd])\n prob = float(\"-inf\")\n\n if currTup in wordTagProbs and tagTup in tagTagProbs:\n p = tagTagProbs[tagTup] + wordTagProbs[currTup]\n prob = p + probMatrix[priorTagInd][wordInd-1]\n\n if (maxProbVal == None) or (math.exp(prob) > math.exp(maxProbVal)):\n maxProbVal = prob\n maxProbKy = priorTagInd\n\n maxPrevIndMatrix[currTagInd][wordInd] = maxProbKy\n probMatrix[currTagInd][wordInd] = maxProbVal\n\n a = np.array(probMatrix)\n maxIndLastCol = np.argmax(a[:,x_length-1])\n t = [tagArr[maxIndLastCol]]\n for x in range(x_length-1, 0, -1):\n maxIndLastCol = maxPrevIndMatrix[maxIndLastCol][x]\n t.insert(0, tagArr[maxIndLastCol])\n\n for y in range(len(t)):\n if t[y] == tags[y]:\n overallCorrect += 1\n overallTotal += 1\n\naccuracy = overallCorrect / overallTotal\nprint(accuracy)\n\n\n\n\n","repo_name":"jaredvonhalle/CSC594-HMM","sub_path":"hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74833897865","text":"from _pytest.pytester import Pytester\nfrom docker.client import DockerClient\nfrom docker.errors import ImageNotFound\nimport pytest\n\n\ndef test_tag_stages(request, pytester: Pytester, docker_client: DockerClient):\n builder_tag = \"localhost/pytest-docker-tools/buildtest:builder\"\n latest_tag = \"localhost/pytest-docker-tools/buildtest:latest\"\n\n def _cleanup():\n for tag in (builder_tag, latest_tag):\n try:\n docker_client.images.remove(tag)\n except ImageNotFound:\n return\n\n request.addfinalizer(_cleanup)\n\n with pytest.raises(ImageNotFound):\n for tag in (builder_tag, latest_tag):\n docker_client.images.get(tag)\n\n # A fake multi stage build.\n pytester.makefile(\n \"\",\n Dockerfile=\"\\n\".join(\n (\n \"FROM alpine:3.13 AS builder\",\n \"RUN touch /hello-intermediate-step\",\n \"RUN touch /hello\",\n \"FROM alpine:3.13\",\n \"COPY --from=builder /hello /hello\",\n )\n ),\n )\n\n pytester.makeconftest(\n \"\\n\".join(\n (\n \"from pytest_docker_tools import build\",\n \"myimage = build(\",\n \" path='.',\",\n f\" tag='{latest_tag}',\",\n f\" stages={{'builder': '{builder_tag}'}},\",\n \")\",\n )\n )\n )\n\n pytester.makepyfile(\n test_reusable_network=\"\\n\".join(\n (\n \"def test_session_1(myimage):\",\n f\" assert '{latest_tag}' in myimage.tags\",\n )\n )\n )\n\n result = pytester.runpytest()\n result.assert_outcomes(passed=1)\n\n latest = docker_client.images.get(latest_tag)\n assert latest is not None\n\n builder = docker_client.images.get(builder_tag)\n assert builder is not None\n\n assert latest.id != builder.id\n","repo_name":"Jc2k/pytest-docker-tools","sub_path":"tests/integration/test_build.py","file_name":"test_build.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"81"} +{"seq_id":"11326755586","text":"import argparse\nimport configparser\nimport logging as log\nimport os\nimport sys\n\nfrom flask import Flask\nfrom pytesseract import pytesseract\nfrom waitress import serve\n\nCONFIG_DEV_FILE = 'src/resources/config-dev.ini'\nCONFIG_PROD_FILE = 'src/resources/config-prod.ini'\n\n\ndef configure_logger(level=log.INFO):\n log.basicConfig()\n log.getLogger().setLevel(level)\n\n\ndef load_command_line_arguments():\n log.debug('Number of arguments:', len(sys.argv), 'arguments.')\n log.debug('Argument List:', str(sys.argv))\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--dev\", action=\"store_true\", help=\"run in dev mode\")\n parser.add_argument(\"-p\", \"--prod\", action=\"store_true\", help=\"run in prod mode\")\n args = parser.parse_args()\n\n if args.dev and args.prod:\n log.warning('Cannot run dev and prod mode simultaneously. Running dev mode instead.')\n args.dev = True\n args.prod = False\n\n if not args.dev and not args.prod:\n args.dev = True\n\n return args\n\n\ndef load_environment(args):\n if args.dev:\n config_file = CONFIG_DEV_FILE\n elif args.prod:\n config_file = CONFIG_PROD_FILE\n else:\n raise EnvironmentError('Running mode unknown. Define either dev or prod.')\n\n os.environ['PUZZLE_SOLVER_CONFIG_FILE'] = config_file\n if not os.path.exists(config_file):\n log.error(f'{config_file} does not exist')\n config = configparser.ConfigParser()\n config.read(config_file)\n log.debug(config.items())\n if 'general' not in config:\n log.error('Config file is not properly configured')\n return config\n\n\ndef load_tesseract(args):\n if args.dev:\n pytesseract.tesseract_cmd = config['tesseract']['executable'] # needed if not running inside docker\n\n\ndef create_app():\n # Load app modules locally to ensure that the configuration and environment variables are set up\n from api.controllers import grid_game_controller\n\n app = Flask(__name__)\n\n app.register_blueprint(grid_game_controller)\n\n return app\n\n\nif __name__ == '__main__':\n args = load_command_line_arguments()\n config = load_environment(args)\n load_tesseract(args)\n\n debug = False\n log_level = config['general'].getint('log_level')\n if log_level == log.DEBUG:\n debug = True\n\n configure_logger(log_level)\n\n if args.dev:\n log.debug('Running in development mode')\n elif args.prod:\n log.debug('Running in productive mode')\n\n log.info(\"Start Server...\")\n app = create_app()\n\n ip_address = config['flask']['ip_address']\n port = config['flask'].getint('port')\n\n if args.prod:\n serve(app, host=ip_address, port=port)\n else:\n app.run(host=ip_address, port=port, debug=debug)\n","repo_name":"niklastanner/puzzle-solver","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5666648499","text":"# Raspberry Pi Uzaktan Kontrol Projesi Server Programi\n\nimport RPi.GPIO as GPIO \t#raspberry pi'nin pinlerini kontrol kutuphanesini ekle\nimport time\n\nled1=18\nled2=23\n\nGPIO.setmode(GPIO.BCM) \t#pin numaralarini boarddaki siralamaya gore ayarla\nGPIO.cleanup()\t\t\t#onceden kalmis olan pin ayarlarini temizle\nGPIO.setup(18,GPIO.OUT)\t\t#led 1in bagli olacagi pini cikis olarak ayarla\n\nfor x in range(1,5): \n\tGPIO.output(led1,GPIO.HIGH) #ledi yak\n\ttime.sleep(1)\t\t\t\t\t\t\t#1sn bekle \n\tGPIO.output(led1,GPIO.LOW) #ledi sondur\n\ttime.sleep(1)\t\t\t\t\t\t\t#1sn bekle\n","repo_name":"yutasrobot/RaspberryHome","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38320467115","text":"from uuid import uuid4\nfrom datetime import date, timedelta, datetime\nfrom random import random, randint\n__author__ = 'lucas'\n\n\nlog_ex = \"177.126.180.83 - - [DATA] \\\"GET /meme.jpg \" \\\n \"HTTP/1.1\\\" 200 2148 \\\"-\\\"userid=USERID\\\"\"\n\nif __name__ == '__main__':\n\n\n lista_cokies = []\n\n data = datetime.now()\n for i in range(100):\n lista_cokies.append(str(uuid4()))\n for i in range(1,5):\n file = open(\"log\"+str(i)+\".txt\",\"w\")\n lista_logs = []\n for i in range(1000):\n aux = randint(0, 99)\n data = data + timedelta(seconds=1)\n log = log_ex.replace(\"DATA\",data.strftime(\"%d/%b/%Y:%H:%M:%S\") + \" -0300\").replace(\"USERID\",lista_cokies[aux])\n lista_logs.append(log)\n lista_logs.append(\"\\n\")\n\n file.writelines(lista_logs)\n file.close()\n","repo_name":"lffsantos/processa_log","sub_path":"generate_fake_log.py","file_name":"generate_fake_log.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31686039333","text":"\n# coding=utf-8\n#! /usr/bin/python\nimport sys\nimport json\nimport csv\nimport io\nimport pandas as pd\nimport numpy as np\n\nLOCATION_TEMP_FOLDER = 'temps'\nclass ServiceUtils():\n def __init__(self):\n print(\"ServiceUtils\")\n def writeFileDataJSON(self,data_filter, file_name):\n print(\"escribiendo archivo\")\n FILE_LOCATION_ALL = (\"%s/%s.json\" % (LOCATION_TEMP_FOLDER,file_name))\n with open(FILE_LOCATION_ALL, 'w') as filehandle: \n filehandle.write('[\\n')\n count = 0\n for listitem in data_filter:\n if count < len(data_filter)-1:\n filehandle.write('%s,\\n' % listitem)\n \n else:\n filehandle.write('%s\\n' % listitem)\n count +=1\n filehandle.write('\\n]')\n def writeFileJSON(self,data,file_name):\n print(\"escribiendo archivo\")\n FILE_LOCATION_ALL = (\"%s/%s.json\" % (LOCATION_TEMP_FOLDER,file_name))\n with open(FILE_LOCATION_ALL, 'w', encoding='utf-8') as f:\n json.dump(data, f, ensure_ascii=False, indent=4)\n\n def openJSONFile(self,file_name):\n print(\"abriendo archivo\")\n FILE_LOCATION_ALL = (\"%s/%s.json\" % (LOCATION_TEMP_FOLDER,file_name))\n with open(FILE_LOCATION_ALL) as f:\n data_load = json.load(f)\n return data_load\n def writeCSV(self,data,file_name):\n np.savetxt(file_name, data, delimiter =\", \", fmt ='% s')\n def saveDataFrame(self,dataFrame,file_name):\n df = pd.DataFrame(dataFrame)\n df.to_csv()\n","repo_name":"ccum/SO-CAL","sub_path":"Source_Code/text_preprocessing/ServiceUtils.py","file_name":"ServiceUtils.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"3860706231","text":"from manimlib.imports import *\nimport os\nimport pyclbr\n\nclass SJF_NPE(ThreeDScene):\n \n def construct(self):\n\n heading=TextMobject(\"SJF NPE \")\n heading.to_edge(UP+LEFT)\n self.play(Write(heading),run_time=2)\n\n \n p,seq=sim()\n self.animate(heading,seq,q=None)\n\n def animate(self,heading,seq,q):\n\n time=0\n text1=[]\n text2=[]\n clk=TextMobject(\"time ={}\".format(str(time)))\n clk.next_to(heading,DOWN,buff=0.5)\n clk.scale(0.8)\n self.add(clk)\n self.play(Write(clk))\n # r_text=TexMobject(\"Ready queue\")\n # r_text.to_corner(RIGHT+UP)\n # r_text.scale(0.5)\n\n #rq=q\n \n for i in range(len(seq)):\n if(seq[i][0]==-1):\n text1.append(TextMobject(\"E\"))\n else:\n text1.append(TextMobject(\"P{}\".format(str(seq[i][0]))))\n text2.append(TextMobject(str(seq[i][1])))\n \n \n for i in range(len(seq)):\n if i==0:\n text1[i].to_edge(LEFT)\n text2[i].next_to(text1[i],DOWN,buff=0.8)\n else:\n text1[i].next_to(text1[i-1],RIGHT,buff=0.8)\n text2[i].next_to(text1[i],DOWN,buff=0.8)\n \n counter=0\n while(counter=process[j][1]):\n\t\t\t\tif(low>process[j][2]):\n\t\t\t\t\tlow=process[j][2]\n\t\t\t\t\tval=j\n\t\t\t\telif(low==process[j][2]):\n\t\t\t\t\tif(process[val][1]>process[j][1]):\n\t\t\t\t\t\tlow=process[j][2]\n\t\t\t\t\t\tval=j\t\t\t\t\t\n\t\t\t\n\t\t\tj=j+1\n\t\t\n\t\tx=process[val][1]-completion_time\n\t\tif(x<=0):\n\t\t\tx=0\t\n\t\t\n\t\tprocess[val][3]=temp+process[val][2]+x #Completion Time = Arrival Time + Burst Time\n\t\tcompletion_time=process[val][3]\n\t\tprocess[val][5]=process[val][3]-process[val][1] #Turnaround Time = Completion Time - Arrival Time\n\t\tprocess[val][4]=process[val][5]-process[val][2] #Waiting Time = Turnaround Time - Burst Time\n\t\t\n\t\tswap(process,val,i)\n\t\ti=i+1\n\n\ndef SJF (process,num):\n\tprint(\"\\nBefore Arrange...\\n\")\n\tprint(\"Process ID\\tArrival Time\\tBurst Time\\n\")\n\t\n\tfor i in range(num):\n\t\tprint(process[i][0],\"\\t\\t\",process[i][1],\"\\t\\t\",process[i][2])\n\t\n\tarrange_arrival_time(process,num)\n\tarrange_completion_time(process,num)\n\tcompletion_time=0\n\tprint(\"\\nAfter Arrange...\\n\")\n\tprint(\"Process ID\\tCompletion Time\\n\")\n\t\n\ti=0\n\tseq=[]\n\twhile(i (4, 5) == 1\n\nNotes\n- Got brute force right away, O(n*m).\n- Jumped to sort both, then binary search one.\n- Can actually just sort one of them (smaller).\n\"\"\"\nimport bisect\nimport sys\n\n\ndef smallest_diff(a, b):\n a = sorted(a)\n min_pair, diff = None, sys.maxsize\n for val in b:\n i = bisect.bisect_left(a, val)\n for j in (i-1, i, i+1):\n if 0 <= j < len(a):\n if abs(a[j] - val) < diff:\n min_pair = (a[j], val)\n diff = abs(a[j] - val)\n return min_pair, diff\n\n\nif __name__ == '__main__':\n print(smallest_diff([1, 4, 10], [-1, 5, 12]))\n","repo_name":"chase-seibert/cracking-the-coding-interview","sub_path":"ctci-16-06-smallest-diff.py","file_name":"ctci-16-06-smallest-diff.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"26040926465","text":"import PySide2\r\nfrom PySide2 import QtCore\r\nfrom PySide2.QtGui import QGuiApplication, Qt, QColor\r\nfrom PySide2.QtWidgets import QWidget, QApplication, QVBoxLayout, QHBoxLayout, QColorDialog\r\nfrom PySide2.QtUiTools import QUiLoader\r\nimport json\r\n\r\n\r\nclass SettingWidget(QWidget):\r\n setting_changed = QtCore.Signal()\r\n\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self._fontcolor = None # QColor(0, 0, 0)\r\n self._color = None # QColor(250, 235, 215) # 古董白\r\n self._fontsize = None # 15\r\n self._font = None # '楷体'\r\n self._height = None # 121\r\n self._width = None # 260\r\n self._opacity = None # 0.7\r\n self.load_setting_from_json()\r\n self.ui = QUiLoader().load('setting.ui')\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.ui.setWindowFlags(Qt.WindowStaysOnTopHint)\r\n self.ui.ok_btn.clicked.connect(self.update_setting_data)\r\n self.ui.color_btn.clicked.connect(self.choose_color)\r\n self.ui.font_color_btn.clicked.connect(self.choose_font_color)\r\n self.ui.reset_btn.clicked.connect(self.reset_setting_data)\r\n self.ui.cancel_btn.clicked.connect(self.ui.close)\r\n self.ui.opacity_value.valueChanged.connect(self._on_opacity_slider_changed)\r\n ...\r\n\r\n def load_setting_from_json(self, key='default'):\r\n with open('setting.json', 'r', encoding='utf8') as f:\r\n d = json.load(f)[key]\r\n self._color = QColor(*d['color'])\r\n self._fontsize = d['fontsize']\r\n self._font = d['font']\r\n self._height = d['height']\r\n self._width = d['width']\r\n self._opacity = d['opacity']\r\n self._fontcolor = QColor(*d['fontcolor'])\r\n self.setting_changed.emit()\r\n\r\n def _on_opacity_slider_changed(self, v):\r\n self._opacity = v / 100\r\n self.ui.label.setText(f'透明度({v / 100:.2f})')\r\n self.setting_changed.emit()\r\n\r\n def choose_color(self):\r\n color_dialog = QColorDialog()\r\n color_dialog.setWindowFlags(Qt.WindowStaysOnTopHint)\r\n color_dialog.exec_()\r\n self._color = color_dialog.selectedColor()\r\n self.setting_changed.emit()\r\n\r\n def choose_font_color(self):\r\n color_dialog = QColorDialog()\r\n color_dialog.setWindowFlags(Qt.WindowStaysOnTopHint)\r\n color_dialog.exec_()\r\n self._fontcolor = color_dialog.selectedColor()\r\n self.setting_changed.emit()\r\n\r\n def get_opacity(self):\r\n return self._opacity\r\n\r\n def set_opacity(self, op):\r\n self._opacity = op\r\n self.ui.opacity_value.setValue(op * 100)\r\n\r\n def get_width(self):\r\n return self._width\r\n\r\n def set_width(self, w):\r\n self._width = w\r\n\r\n def get_height(self):\r\n return self._height\r\n\r\n def set_height(self, h):\r\n self._height = h\r\n\r\n def get_font(self):\r\n return self._font\r\n\r\n def get_fontsize(self):\r\n return self._fontsize\r\n\r\n def get_color(self) -> QColor:\r\n return self._color\r\n\r\n def set_color(self, c: QColor):\r\n self._color = c\r\n\r\n def get_font_color(self) -> QColor:\r\n return self._fontcolor\r\n\r\n def set_font_color(self, c: QColor):\r\n self._fontcolor = c\r\n\r\n def update_setting_data(self):\r\n self._opacity = self.ui.opacity_value.value() / 100\r\n self._width = self.ui.width_value.value()\r\n self._height = self.ui.height_value.value()\r\n self._font = self.ui.font_text.currentText()\r\n self._fontsize = self.ui.fontsize_value.value()\r\n # self._fontsize is set at the Select-Color button\r\n # self._color is set at the Select-Color button\r\n self.setting_changed.emit()\r\n # print(self._opacity, self._width, self._height, self._font, self._fontsize)\r\n\r\n def reset_setting_data(self):\r\n self._opacity = 0.7\r\n self._width = 260\r\n self._height = 121\r\n self._font = '楷体'\r\n self._fontsize = 15\r\n self._color = QColor(250, 235, 215)\r\n self._fontcolor = QColor(0, 0, 0)\r\n self.ui.opacity_value.setValue(70)\r\n self.ui.width_value.setValue(260)\r\n self.ui.height_value.setValue(121)\r\n self.ui.font_text.setCurrentFont('楷体')\r\n self.ui.fontsize_value.setValue(15)\r\n self.setting_changed.emit()\r\n\r\n def closeEvent(self, event: PySide2.QtGui.QCloseEvent) -> None:\r\n event.ignore()\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication([])\r\n # setting = SettingWidget()\r\n # setting.ui.show()\r\n from my_set import Ui_Setting\r\n\r\n test = QWidget()\r\n test.ui = Ui_Setting()\r\n test.ui.setupUi(test)\r\n test.show()\r\n print(test.get_setting_data())\r\n app.exec_()\r\n","repo_name":"Quiser714/Desktop-Floating-Window","sub_path":"setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":4731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73699513865","text":"# First we import the module panda as pd\r\nimport pandas as pd\r\nimport re\r\n\r\ndf = pd.read_excel(r'C:\\Users\\JOT1WE\\Desktop\\PythonTest\\extra\\complete.xlsx')\r\n\r\ndf.sort_values(['Country'], ascending=True, inplace=True) # Sort in ascending order according to countries\r\n\r\nresult_df1=df.loc[~df['Country'].str.contains(',', na=False)] # Removes raws where column 'Country' has a comma (when there is comma, there is more than one country)\r\n\r\nmask=df['Country Association'].str.len() > 1 # If there is an entry in column 'Country Association'. The length of that element should be greater than '1'. mask stores a 'True' or 'False' value. \r\n\r\nresult_df2 = df.loc[mask] # A new dataframe 'result_df2' is created to remove the 'fasle' values.\r\n\r\nresult_df3=df.loc[df['Country Association'].str.len() == 1 & df['Country'].str.contains(',', na=False)] # To sort values with no entry in column 'Country association' (somehow, eventhough there is not a value in 'Country association', the length is '1' instead of zero) and has a comma in column 'Country'\r\nresult_df3.reset_index(drop=True, inplace=True)\r\nlst1=['India', 'Belgium', 'Belarus','Czechia'] #Applicable countries\r\nlst2=[]\r\ntmplst1=[]\r\ntmplst2=[]\r\ni=0\r\n\r\nfor cell in result_df3['Country']:\r\n i=i+1\r\n lst2=cell.split(',')\r\n for item in lst2:\r\n if str(item) in lst1:\r\n tmplst1.append(str(item)) # Appending country name to the list 'tmplst1'\r\n tmplst2.append(str(result_df3.iloc[i-1,0])) # Appending ID to the list 'tmplst2'. Doubt exist between 'i' or 'i-1'.\r\ndflst1=pd.DataFrame(tmplst1, columns = ['Country']) \r\ndflst2=pd.DataFrame(tmplst2, columns = ['ID']) \r\nframes = [dflst1, dflst2]\r\nresult_df4 = pd.concat(frames,axis=1)\r\nresult_df4.sort_values(['Country'], ascending=True, inplace=True)\r\n\r\ntempdf=result_df4.drop_duplicates(subset=['ID'])\r\nk=0\r\nresult_df4.reset_index(drop=True, inplace=True)\r\ntempdf.reset_index(drop=True, inplace=True)\r\n\r\nprint(tempdf['ID']) #print unique ID'S\r\n\r\nfor item1 in tempdf['ID']: # This dataframe 'tempdf' have column 'ID' without duplicates\r\n print('Checking for ID', item1)\r\n k=0\r\n for item2 in result_df4['ID']: # This dataframe 'result_df' have column 'ID' with duplicates\r\n k=k+1\r\n if str(item1)==str(item2): # compares columns 'ID' between these two dataframes 'temp_df' and 'result_df' to print out countries sharing the same regulation ID. \r\n print('Country:',result_df4.iloc[k-1,0])\r\n \r\nresult_df1.to_excel('Countries.xlsx', index=False)\r\nresult_df2.to_excel('With_Coutry_Association.xlsx', index=False)\r\nresult_df3.to_excel('Countries_with_more_than_one_country.xlsx', index=False)\r\nresult_df4.to_excel('Countries_with_more_than_one_country-sorted.xlsx', index=False)","repo_name":"tomjpalamattam/Python","sub_path":"Projects/My-Work-At-Bosch/other-scripts/script7.py","file_name":"script7.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41280849779","text":"from django.db import models\n\n\nclass Category(models.Model):\n name = models.CharField(\n max_length=200,\n unique=True,\n )\n\n\nclass Author(models.Model):\n name = models.CharField(\n max_length=200,\n unique=True,\n )\n\n\nclass Book(models.Model):\n book_id = models.CharField(\n max_length=12,\n unique=True,\n )\n title = models.CharField(\n max_length=200,\n )\n authors = models.ManyToManyField(\n Author,\n )\n published_date = models.DateField()\n # If published_date contains only year then set to False, else if full date then True\n exact_date = models.BooleanField(\n default=False,\n )\n categories = models.ManyToManyField(\n Category,\n )\n average_rating = models.FloatField(\n null=True,\n )\n ratings_count = models.PositiveIntegerField(\n null=True,\n )\n thumbnail = models.URLField(\n max_length=500,\n )\n created_date = models.DateTimeField(\n auto_now_add=True,\n )\n modified_date = models.DateTimeField(\n auto_now=True,\n )\n\n @staticmethod\n def get_ids_which_already_exists(ids):\n return Book.objects.filter(book_id__in=ids).values_list('book_id', flat=True)\n\n","repo_name":"mtolkacz/bookject","sub_path":"app/bookject/books/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18228009773","text":"from django.shortcuts import render,get_object_or_404\nfrom .serializers import MemoSerializer\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom rest_framework import status\nfrom .models import Memo\n\n# Create your views here.\n@api_view(['GET', 'POST'])\ndef apiMemo (request):\n if request.method == 'GET':\n memos = Memo.objects.all()\n serializer = MemoSerializer(memos,many=True)\n return Response(serializer.data)\n elif request.method == 'POST' :\n serializer=MemoSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response({\"message\":\"작성되었습니다.\"})\n # elif request.method == 'DELETE':\n # memo=get_object_or_404(Memo,pk=request.data['id'])\n # memo.delete()\n # return Response({\"message\":\"삭제되었습니다.\"})\n\n@api_view(['DELETE'])\ndef deleteMemo (request,memo_id):\n memo=get_object_or_404(Memo,pk=memo_id)\n memo.delete()\n return Response({\"message\":\"삭제되었습니다.\"})","repo_name":"HorangApple/TIL","sub_path":"JavaScript/수업/day10/REST/memo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13107592076","text":"# -*- codeing = utf-8 -*-\r\n# @Time : 2021/6/23 上午 3:48\r\n# @Author : 漫天烟华\r\n# @File : 数据提取.py\r\n# @Software : PyCharm\r\n\r\n\r\n# 数据类型\r\n# ----------------------\r\n# 按响应分类(结构化、非结构化)\r\n# ----------------------\r\n# 结构化数据\r\n# 1.json数据(常见),提取���据方式:\r\n# json模块\r\n# re模块\r\n# jsonpath模块\r\n# 2.xml数据(少见),提取数据方式:\r\n# re模块\r\n# lxml模块\r\n# ----------------------\r\n# 非结构化数据\r\n# html字符串,提取数据方式:\r\n# re模块\r\n# lxml模块 xpath语法\r\n# beautifulsoup xpath、正则、css选择器\r\n# pyquery css选择器\r\n# -----------------------------------------------------------------------------------------------------------\r\n# jsonpath 模块\r\n\r\n# 在多层嵌套的复杂字典中,想要根据key和下标来批量提取value,是非常困难的且不高效的\r\n# jsonpath 可以帮助我们用更高效简洁的方法实现目的\r\n\r\n# 常用语法\r\n# $ 根节点(最外层的大括号)\r\n# . 子节点\r\n# .. 子孙节点(可以跳级,类似xpath的//)\r\n#\r\n\r\nimport requests\r\nimport jsonpath\r\n\r\n# 拉勾网城市接口\r\n# url = 'https://www.lagou.com/lbs/getAllCitySearchLabels.json'\r\n# headers = {\r\n# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'}\r\n# response = requests.get(url, headers=headers)\r\n# jsons = response.json()\r\n#\r\n# city_name = jsonpath.jsonpath(jsons, '$..G..name')\r\n# city_code = jsonpath.jsonpath(jsons, '$..G..code')\r\n# print(city_name)\r\n# print(city_code)\r\n# -----------------------------------------------------\r\n\r\nbookstore = {\r\n \"store\": {\r\n \"book\": [\r\n {\r\n \"category\": \"reference\",\r\n \"author\": \"Nigel Rees\",\r\n \"title\": \"Sayings of the Century\",\r\n \"price\": 8.95\r\n },\r\n {\r\n \"category\": \"fiction\",\r\n \"author\": \"Evelyn Waugh\",\r\n \"title\": \"Sword of Honour\",\r\n \"price\": 12.99\r\n },\r\n {\r\n \"category\": \"fiction\",\r\n \"author\": \"Herman Melville\",\r\n \"title\": \"Moby Dick\",\r\n \"isbn\": \"0-553-21311-3\",\r\n \"price\": 8.99\r\n },\r\n {\r\n \"category\": \"fiction\",\r\n \"author\": \"J. R. R. Tolkien\",\r\n \"title\": \"The Lord of the Rings\",\r\n \"isbn\": \"0-395-19395-8\",\r\n \"price\": 22.99\r\n }\r\n ],\r\n \"bicycle\": {\r\n \"color\": \"red\",\r\n \"price\": 19.95\r\n }\r\n },\r\n \"expensive\": 10\r\n}\r\n\r\n# author = jsonpath.jsonpath(bookstore, '$..author')\r\n# print(author) # '$.store.book[*].author' 也可以找出该例子的作者\r\n# price = jsonpath.jsonpath(bookstore, '$..price')\r\n# print(price)\r\n# test = jsonpath.jsonpath(bookstore, '$..book[?(@.price>10)]')\r\n# print(test)\r\n\r\n\r\n# -----------------------------------------------------------------------------------------------------------\r\n# xpath 部分...略\r\n","repo_name":"liaofaz/ro","sub_path":"爬虫/数据提取.py","file_name":"数据提取.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25966746948","text":"import numpy as np\nimport csv\n\nout = csv.writer(open(\"Data/best4linreg.csv\", \"w\"), delimiter=',',quoting=csv.QUOTE_NONE)\n\np = 0.0\nrd = np.random.randint(0, 10, 1)\nrd = rd[0]\n\nfor i in range(1000):\n p += rd\n out.writerow((p, p*2, p*3))\n rd *=1.5\n\n\n","repo_name":"xgu60/CourseProject_ML4T","sub_path":"Build_and_assess_regression_learners/best4linreg.py","file_name":"best4linreg.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35768151993","text":"def sumBelow100(a):\n print('Recursion')\n if len(a) == 0:\n return 0\n s = sumBelow100(a[1:]) # Recursion call only once!\n if a[0] + s > 100:\n return s\n else:\n return a[0] + s\n\na = [10,30,80] # Since Recursion works backward, picking up numbers and evaluation will go in reverse order\nprint(sumBelow100(a))","repo_name":"jimmy1087/codeKatas","sub_path":"dp/exercises12_commonSense/sumBelow100_dp.py","file_name":"sumBelow100_dp.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4410004230","text":"def roll_dice(probability_of_six=1/6):\r\n \"\"\"\r\n Return a random number on a dice based on a given probability of rolling 6.\r\n If the die is rigged, the probability of the remaining rolls is uniform.\r\n \"\"\"\r\n if random() < probability_of_six:\r\n return 6\r\n return randint(1, 5)\r\n\r\n\r\ndef game(board_size, print_state=True, probability_of_six=1/6):\r\n \"\"\"\r\n Play a game that simulates the movement of a piece on a game plan of size\r\n . If is True, print the roll and state of the\r\n piece after each round. Otherwise, return the number of rounds needed to\r\n reach the goal. You can also set the .\r\n \"\"\"\r\n game_round = 0\r\n position = 0\r\n while position < board_size:\r\n game_round += 1\r\n if print_state:\r\n print(\"Round\", game_round, \"-- Roll:\", end=\" \")\r\n\r\n move_forward = 0\r\n while move_forward % 6 == 0:\r\n roll = roll_dice(probability_of_six)\r\n if print_state:\r\n print(roll, end=\" \")\r\n move_forward += roll\r\n if move_forward == 18:\r\n # Unlucky roll 666\r\n position = 0\r\n move_forward = 0\r\n break\r\n if move_forward % 6 == 0 and position + move_forward > board_size:\r\n # Don't throw again if you are already out of bounds after 6\r\n break\r\n\r\n if position + move_forward <= board_size:\r\n position += move_forward\r\n if print_state:\r\n print(\"\\nI am on position\", position)\r\n\r\n if print_state:\r\n print(\"Game finished in round\", game_round)\r\n else:\r\n return game_round","repo_name":"teaching-lab/stack-cs-teacher-training","sub_path":"activities/code-review/code/code4.py","file_name":"code4.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"12749762894","text":"#Crear y sobre escribir (w)\r\n#Para leer (r)\r\n#Para escribir más (a)\r\nprint(\"----------\")\r\nfrom pathlib import Path\r\narchivo_1 = Path(\"./archivo1.txt\")\r\nwith archivo_1.open( mode=\"w\") as file_1:\r\n file_1.write(\"hola\\nsalto de linea-\\n\")\r\n file_1.write(\"me hola bro\")\r\n \r\n \r\n \r\n#Leer (abrir y leer)\r\nwith open(\"./archivo1.txt\", \"r\") as file:\r\n for i in file:\r\n print(i)\r\nprint(\"-------------\")\r\n\r\n \r\n#Verificar si exiate y saca la ruta padre\r\nfrom pathlib import Path\r\narchivo = Path(\"./archivo.text\")\r\nprint(archivo.parent)\r\nprint(\"-----------\")\r\n\r\n\r\n#Nombre del archivo\r\nprint(archivo.name)\r\nprint(\"----------------\")\r\n\r\n\r\n#Sacar la ruta\r\nprint(archivo.absolute())\r\nprint(\"----------------------\")\r\n\r\n\r\n#Contar linea de txt\r\nprint(\"Comenzar\")\r\nx = 0\r\ny = 0\r\n\r\n\r\n#Contar lineas \r\nwith archivo_1.open( mode=\"r\") as file_1:\r\n for i in file_1:\r\n x += 1\r\nprint(x)\r\nprint(\"--------------\")\r\n\r\n\r\n#Contar caracteres\r\nwith open(\"archivo1.txt\", \"r\") as file_1:\r\n texto = file_1.read()\r\n texto = texto.replace(\"\\n\", \"\").replace(\" \", \"\")\r\n print(len(texto))\r\nprint(\"---------------\")\r\n\r\n\r\n#Contar palabras\r\nx = 0\r\nwith open(\"./archivo1.txt\", \"r\") as file_1:\r\n texto = file_1.read().replace(\"\\n\", \" \")\r\n texto = texto.split(\" \")\r\n while \"\" in texto:\r\n texto.remove(\"\")\r\n print(len(texto))\r\nprint(\"------------\")\r\n\r\n\r\n#cambiar caracter\r\nwith open(\"./archivo1.txt\", \"r\") as archivo:\r\n texto = archivo.read()\r\n if \"hola\" in texto:\r\n texto = texto.replace(\"hola\", \"me\")\r\n print(texto)\r\nwith open(\"./archivo1.txt\", \"w\") as archivo:\r\n archivo.write(texto)\r\nprint(\"----------\")\r\n\r\n\r\n#Contar cuantas veces aparece una palabra\r\nx = 0\r\nwith open(\"./archivo1.txt\", \"r\") as archivo:\r\n texto = archivo.read().replace(\"\\n\", \" \")\r\n texto = texto.split(\" \")\r\n for i in texto:\r\n if \"me\" == i:\r\n x +=1\r\n print(x)\r\nprint(\"-------------\")\r\n\r\n\r\n#Eliminar lineas\r\nwith open(\"./entrada.txt\", \"r\") as archivo, open(\"./salida.txt\", \"w\") as salida:\r\n for i in archivo:\r\n if i.strip():\r\n salida.write(i)\r\nwith open(\"./salida.txt\", \"r\") as salida:\r\n text = salida.read()\r\n print(text)\r\n\r\n\r\nprint(\"------------------------\")\r\n#Sumar numero par\r\nx = 0\r\nwith open(\"./numeros.txt\", \"r\") as file:\r\n numer = file.read()\r\n numer = numer.split(\" \")\r\n for i in numer:\r\n try:\r\n i = int(i)\r\n if i % 2 == 0:\r\n x += i\r\n except:\r\n print(\"El numero que ingreso no es un numero entero\")\r\n print(x)\r\n\r\n\r\nprint(\"------------------\")\r\n#Contar palabras distintas\r\nx = []\r\ny = 0\r\nwith open(\"./entrada.txt\", \"r\") as file:\r\n text = file.read().replace(\"\\n\", \" \")\r\n text = text.split(\" \")\r\n while \"\" in text:\r\n text.remove(\"\")\r\n for i in text:\r\n if i not in x:\r\n x.append(i)\r\n y += 1\r\n print(y)\r\nprint(\"---------------\")\r\n\r\n\r\n#Ordena mediante el abecedario palabras\r\nwith open(\"./alfabeticamente.txt\", \"r\") as file:\r\n text = file.read()\r\n text = text.replace(\"\\n\", \" \")\r\n text = text.split(\" \")\r\n text.sort()\r\n resultado = \" \".join(text)\r\n print(resultado)\r\n\r\n#Palabras mas largar a la mas corta\r\nprint(\"-----------\")\r\npalabras = []\r\nwith open(\"./palabras_largas.txt\", \"r\") as file:\r\n text = file.read().replace(\"\\n\", \" \")\r\n text = text.split()\r\n final = sorted(text, key=lambda x:len(x), reverse=True)\r\n print(final)\r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"KendrydRodriguez1/Estudiar_examen","sub_path":"Examen/archivos.py","file_name":"archivos.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73671126026","text":"import importlib\nimport os\nfrom pathlib import Path\nimport tempfile\nfrom typing import Union\nimport uuid\n\nfrom arcgis.features import GeoAccessor, FeatureSet\nfrom arcgis.geometry import Geometry\nfrom arcgis.gis import GIS\nimport pandas as pd\n\nfrom . import utils\nfrom .country import Country\n\narcpy_avail = True if importlib.util.find_spec(\"arcpy\") else False\n\nif arcpy_avail:\n import arcpy\n from ._registry import get_ba_key_value\n\n # ensure previous runs do not interfere\n arcpy.env.overwriteOutput = True\n\n# location to store temp files if necessary\ncsv_file_prefix = 'temp_closest'\ntemp_file_root = os.path.join(tempfile.gettempdir(), csv_file_prefix)\n\ndef _prep_sdf_for_nearest(input_dataframe: pd.DataFrame, id_column: str):\n \"\"\"\n Given an input Spatially Enabled Dataframe, prepare it to work\n well with the nearest solver.\n\n Args:\n input_dataframe: Spatially Enabled Dataframe with really\n any geometry.\n id_column: Field uniquely identifying each of location to\n be used for routing to nearest.\n\n Returns: Spatially Enabled Dataframe of points with correct\n columns for routing to nearest.\n \"\"\"\n # check inputs\n assert isinstance(input_dataframe, pd.DataFrame), f'The input dataframe must be a Pandas DataFrame, not ' \\\n f'{type(input_dataframe)}.'\n\n # ensure the geometry is set\n geom_col_lst = [c for c in input_dataframe.columns if input_dataframe[c].dtype.name.lower() == 'geometry']\n assert len(geom_col_lst) > 0, 'The DataFrame does not appear to have a geometry column defined. This can be ' \\\n 'accomplished using the \"df.spatial.set_geometry\" method.'\n geom_col = geom_col_lst[0]\n\n # ensure the column is in the dataframe columns\n assert id_column in input_dataframe.columns, f'The provided id_column, \"{id_column},\" does not appear to be in ' \\\n f'the columns [{\", \".join(input_dataframe.columns)}]\"'\n\n # par down the input dataframe to just the columns needed\n input_dataframe = input_dataframe[[id_column, geom_col]].copy()\n\n # rename the columns to follow the schema needed for routing\n input_dataframe.columns = ['ID', 'SHAPE']\n\n # ensure the spatial reference is WGS84 - if not, make it so\n if input_dataframe.spatial.sr.wkid != 4326:\n input_dataframe = input_dataframe.dm.project(4326)\n\n # if the geometry is not points, we still need points, so get the geometric centroids\n if input_dataframe.spatial.geometry_type != ['point']:\n input_dataframe['SHAPE'] = input_dataframe[geom_col].apply(\n lambda geom: Geometry({'x': geom.centroid[0], 'y': geom.centroid[1],\n 'spatialReference': geom.spatial_reference}))\n input_dataframe.spatial.set_geometry('SHAPE')\n\n # add a second column for the ID as Name\n input_dataframe['Name'] = input_dataframe['ID']\n\n # ensure the geometry is correctly being recognized\n input_dataframe.spatial.set_geometry('SHAPE')\n\n # set the order of the columns and return\n return input_dataframe[['ID', 'Name', 'SHAPE']]\n\n\ndef _get_max_near_dist_arcpy(origin_lyr):\n \"\"\"Get the maximum geodesic distance between stores.\"\"\"\n # create a location for temporary data\n temp_table = r'in_memory\\near_table_{}'.format(uuid.uuid4().hex)\n\n # if only one location, cannot generate a near table, and default to 120 miles\n if int(arcpy.management.GetCount(origin_lyr)[0]) <= 1:\n max_near_dist = 120 * 1609.34\n\n else:\n # use arcpy to get a table of all distances between stores\n near_tbl = arcpy.analysis.GenerateNearTable(\n in_features=origin_lyr,\n near_features=origin_lyr,\n out_table=temp_table,\n method=\"GEODESIC\"\n )[0]\n\n # get the maximum near distance, which will be in meters\n meters = max([row[0] for row in arcpy.da.SearchCursor(near_tbl, 'NEAR_DIST')])\n\n # remove the temporary table to ensure not stuff lying around and consuming RAM\n arcpy.management.Delete(temp_table)\n\n # get the maximum near distance (in meters)\n max_near_dist = meters * 0.00062137\n\n return max_near_dist\n\n\ndef _get_nearest_solve_local(origin_dataframe: pd.DataFrame, destination_dataframe: pd.DataFrame,\n destination_count: int, network_dataset: [Path, str],\n maximum_distance: [int, float] = None):\n \"\"\"\n Perform network solve using local resources with assumption of standard input.\n\n Args:\n origin_dataframe: Origin points Spatially Enabled Dataframe\n destination_dataframe: Destination points Spatially Enabled Dataframe\n destination_count: Destination points Spatially Enabled Dataframe\n network_dataset: Path to ArcGIS Network dataset for performing routing.\n maximum_distance: Maximum nearest routing distance in miles.\n\n Returns: Spatially Enabled Dataframe of solved closest facility routes.\n \"\"\"\n # make sure the path to the network dataset is a string\n network_dataset = str(network_dataset) if isinstance(network_dataset, Path) else network_dataset\n\n # get the mode of travel from the network dataset - rural so gravel roads are fair game\n nd_lyr = arcpy.nax.MakeNetworkDatasetLayer(network_dataset)[0]\n trvl_mode_dict = arcpy.nax.GetTravelModes(nd_lyr)\n trvl_mode = trvl_mode_dict['Rural Driving Time']\n\n # create the closest solver object instance\n # https://pro.arcgis.com/en/pro-app/arcpy/network-analyst/closestfacility.htm\n closest_solver = arcpy.nax.ClosestFacility(network_dataset)\n\n # set parameters for the closest solver\n closest_solver.travelMode = trvl_mode\n closest_solver.travelDirection = arcpy.nax.TravelDirection.ToFacility\n # TODO: How to set this to distance?\n closest_solver.timeUnits = arcpy.nax.TimeUnits.Minutes\n closest_solver.distanceUnits = arcpy.nax.DistanceUnits.Miles\n closest_solver.defaultTargetFacilityCount = destination_count\n closest_solver.routeShapeType = arcpy.nax.RouteShapeType.TrueShapeWithMeasures\n closest_solver.searchTolerance = 5000\n closest_solver.searchToleranceUnits = arcpy.nax.DistanceUnits.Meters\n\n # since maximum distance is optional, well, make it optional\n if maximum_distance is not None:\n closest_solver.defaultImpedanceCutoff = maximum_distance\n\n # load the origin and destination feature data frames into memory and load into the solver object instance\n # TODO: test if can use 'memory' workspace instead of scratch\n origin_fc = origin_dataframe.spatial.to_featureclass(os.path.join(arcpy.env.scratchGDB, 'origin_tmp'))\n closest_solver.load(arcpy.nax.ClosestFacilityInputDataType.Incidents, origin_fc)\n\n dest_fc = destination_dataframe.spatial.to_featureclass(os.path.join(arcpy.env.scratchGDB, 'dest_tmp'))\n closest_solver.load(arcpy.nax.ClosestFacilityInputDataType.Facilities, dest_fc)\n\n # run the solve, and get comfortable\n closest_result = closest_solver.solve()\n\n # export the results to a spatially enabled data frame, and do a little cleanup\n # TODO: test if can use 'memory/routes' instead - the more current method\n route_fc = 'in_memory/routes'\n closest_result.export(arcpy.nax.ClosestFacilityOutputDataType.Routes, route_fc)\n route_oid_col = arcpy.Describe(route_fc).OIDFieldName\n closest_df = GeoAccessor.from_featureclass(route_fc)\n arcpy.management.Delete(route_fc)\n if route_oid_col:\n closest_df.drop(columns=[route_oid_col], inplace=True)\n\n # get rid of the extra empty columns the local network solve adds\n closest_df.dropna(axis=1, how='all', inplace=True)\n\n # populate the origin and destination fields so the schema matches what online solve returns\n name_srs = closest_df.Name.str.split(' - ')\n closest_df['IncidentID'] = name_srs.apply(lambda val: val[0])\n closest_df['FacilityID'] = name_srs.apply(lambda val: val[1])\n\n return closest_df\n\n\ndef _reformat_closest_result_dataframe(closest_df: pd.DataFrame):\n \"\"\"\n Reformat the schema, dropping unneeded columns and renaming those kept to be more in line with this workflow.\n\n Args:\n closest_df: Dataframe of the raw output routes from the find closest analysis.\n\n Returns: Spatially Enabled Dataframe reformatted.\n \"\"\"\n # create a list of columns containing proximity metrics\n proximity_src_cols = [col for col in closest_df.columns if col.startswith('Total_')]\n\n # if both miles and kilometers, drop miles, and keep kilometers\n miles_lst = [col for col in proximity_src_cols if 'miles' in col.lower()]\n kilometers_lst = [col for col in proximity_src_cols if 'kilometers' in col.lower()]\n if len(miles_lst) and len(kilometers_lst):\n proximity_src_cols = [col for col in proximity_src_cols if col != miles_lst[0]]\n\n # calculate side of street columns\n closest_df['proximity_side_street_right'] = (closest_df['FacilityCurbApproach'] == 1).astype('int64')\n closest_df['proximity_side_street_left'] = (closest_df['FacilityCurbApproach'] == 2).astype('int64')\n side_cols = ['proximity_side_street_left', 'proximity_side_street_right']\n\n # filter the dataframe to just the columns we need\n src_cols = ['IncidentID', 'FacilityRank', 'FacilityID'] + proximity_src_cols + side_cols + ['SHAPE']\n closest_df = closest_df[src_cols].copy()\n\n # replace total in proximity columns for naming convention\n closest_df.columns = [col.lower().replace('total', 'proximity') if col.startswith('Total_') else col\n for col in closest_df.columns]\n\n # rename the columns for the naming convention\n rename_dict = {'IncidentID': 'origin_id', 'FacilityRank': 'destination_rank', 'FacilityID': 'destination_id'}\n closest_df = closest_df.rename(columns=rename_dict)\n\n return closest_df\n\n\ndef _explode_closest_rank_dataframe(closest_df: pd.DataFrame, origin_id_col: str = 'origin_id',\n rank_col: str = 'destination_rank',\n dest_id_col: str = 'destination_id',\n dest_keep_cols: list = None):\n \"\"\"\n Effectively explode out or pivot the data so there is only a single record for each origin.\n\n Args:\n closest_df: Spatially Enabled Dataframe reformatted from the raw output of find nearest.\n origin_id_col: Column uniquely identifying each origin - default 'origin_id'\n rank_col: Column identifying the rank of each destination - default 'destination_rank'\n dest_id_col: Column uniquely identifying each destination - default 'destination_id'\n\n Returns: Dataframe with a single row for each origin with multiple destination metrics for each.\n \"\"\"\n # create a dataframe to start working with comprised of only the unique origin_dataframe to start with\n origin_dest_df = pd.DataFrame(closest_df[origin_id_col].unique(), columns=[origin_id_col])\n\n # get a list of the proximity columns\n proximity_cols = [col for col in closest_df.columns if col.startswith('proximity_')]\n\n # add any destination columns\n if dest_keep_cols:\n proximity_cols = proximity_cols + dest_keep_cols\n\n # iterate the closest destination ranking\n for rank_val in closest_df[rank_col].unique():\n\n # filter the dataframe to just the records with this destination ranking\n rank_df = closest_df[closest_df[rank_col] == rank_val]\n\n # create a temporary dataframe to begin building the columns onto\n df_temp = rank_df[origin_id_col].to_frame()\n\n # iterate the relevant columns\n for col in [dest_id_col] + proximity_cols:\n\n # create a new column name from the unique value and the original row name\n new_name = f'{col}_{rank_val:02d}'\n\n # filter the data in the column with the unique value\n df_temp[new_name] = rank_df[col].values\n\n # set the index to the origin id for joining\n df_temp.set_index(origin_id_col, inplace=True)\n\n # join the temporary dataframe to the master\n origin_dest_df = origin_dest_df.join(df_temp, on=origin_id_col)\n\n return origin_dest_df\n\n\ndef _get_nearest_local(origin_dataframe: pd.DataFrame, destination_dataframe: pd.DataFrame,\n network_dataset: [str, Path], origin_id_column: str = 'LOCNUM',\n destination_id_column: str = 'LOCNUM', destination_count: int = 4) -> pd.DataFrame:\n \"\"\"Local implementation of get nearest solution.\"\"\"\n # check to make sure network analyst is available using the env object to make it simplier\n env = utils.Environment()\n if 'Network' in env.arcpy_extensions:\n env.arcpy_checkout_extension('Network')\n else:\n raise Exception('To perform network routing locally you must have access to the ArcGIS Network Analyst '\n 'extension. It appears this extension is either not installed or not licensed.')\n\n # ensure the dataframes are in the right schema and have the right geometry (points)\n origin_net_df = _prep_sdf_for_nearest(origin_dataframe, origin_id_column)\n dest_net_df = _prep_sdf_for_nearest(destination_dataframe, destination_id_column)\n\n # run the closest analysis locally\n closest_df = _get_nearest_solve_local(origin_net_df, dest_net_df, destination_count, network_dataset)\n\n return closest_df\n\n\ndef _get_nearest_gis(origin_dataframe: pd.DataFrame, destination_dataframe: pd.DataFrame,\n source: [str, Country, GIS], origin_id_column: str = 'LOCNUM',\n destination_id_column: str = 'LOCNUM', destination_count: int = 4) -> pd.DataFrame:\n \"\"\"Web GIS implementation of get nearest solution.\"\"\"\n\n # TODO: backport these to be optional input parameters\n return_geometry = True\n output_spatial_reference = 4326\n\n # build the spatial reference dict\n out_sr = {'wkid': output_spatial_reference}\n\n # if a country instance, get the GIS object from it\n if isinstance(source, Country):\n assert isinstance(Country.source, GIS), 'The source Country must be reference an ArcGIS Web GIS object ' \\\n 'instance to solve using a GIS.'\n source = Country.source\n\n # run a couple of checks to make sure we do not encounter strange errors later\n assert isinstance(source, GIS), 'The source must be a GIS object instance.'\n assert utils.has_networkanalysis_gis(source.users.me), 'You must have the correct permissions in the Web GIS to ' \\\n 'perform routing solves. It appears you do not.'\n\n # prep the datasets for routing\n origin_fs = _prep_sdf_for_nearest(origin_dataframe, origin_id_column).spatial.to_featureset().to_dict()\n dest_fs = _prep_sdf_for_nearest(destination_dataframe, destination_id_column).spatial.to_featureset().to_dict()\n\n # create the url for doing routing\n route_url = source.properties.helperServices.route.url\n solve_url = '/'.join(route_url.split('/')[:-1]) + '/ClosestFacility/solveClosestFacility'\n\n # construct the payload for the routing solve\n params = {\n 'incidents': origin_fs,\n 'facilities': dest_fs,\n 'returnCFRoutes': True,\n 'f': 'json',\n 'defaultTargetFacilityCount': destination_count,\n 'outputLines': 'esriNAOutputLineTrueShape' if return_geometry else 'esriNAOutputLineNone',\n 'outSR': out_sr\n }\n\n # call the server for the solve\n res = source._con.post(solve_url, params)\n\n # unpack the results from the response\n route_df = FeatureSet.from_dict(res['routes']).sdf\n\n # clean up any empty columns\n notna_srs = route_df.isna().all()\n drop_cols = notna_srs[notna_srs].index.values\n route_df.drop(columns=drop_cols, inplace=True)\n\n # populate the origin and destination id columns so the output will be as expected\n id_srs = route_df['Name'].str.split(' - ')\n route_df['IncidentID'] = id_srs.apply(lambda val: val[0])\n route_df['FacilityID'] = id_srs.apply(lambda val: val[1])\n\n return route_df\n\n\ndef get_nearest(origin_dataframe: pd.DataFrame, destination_dataframe: pd.DataFrame,\n source: [str, Path, Country, GIS], single_row_per_origin: bool = True,\n origin_id_column: str = 'LOCNUM', destination_id_column: str = 'LOCNUM',\n destination_count: int = 4, near_prefix: str = None,\n destination_columns_to_keep: [str, list] = None) -> pd.DataFrame:\n \"\"\"\n Create a closest destination dataframe using origin and destination Spatially Enabled\n Dataframes, but keep each origin and destination still in a discrete row instead\n of collapsing to a single row per origin. The main reason to use this is if\n needing the geometry for visualization.\n\n Args:\n origin_dataframe: Origins for network solves.\n destination_dataframe: Destination points in one of the supported input formats.\n source: Either the path to the network dataset, the Country object associated with\n the Business Analyst source being used, or a GIS object instance.\n single_row_per_origin: Optional - Whether or not to pivot the results to return\n only one row for each origin location. Default is True.\n origin_id_column: Optional - Column in the origin points Spatially Enabled Dataframe\n uniquely identifying each feature. Default is 'LOCNUM'.\n destination_id_column: Column in the destination points Spatially Enabled Dataframe\n uniquely identifying each feature\n destination_count: Integer number of destinations to search for from every origin\n point.\n near_prefix: String prefix to prepend onto near column names in the output.\n destination_columns_to_keep: List of columns to keep in the output. Commonly, if\n businesses, this includes the column with the business names.\n\n Returns: Spatially Enabled Dataframe with a row for each origin id, and metrics for\n each nth destinations.\n \"\"\"\n\n for df in [origin_dataframe, destination_dataframe]:\n assert isinstance(df, pd.DataFrame), 'Origin and destination dataframes must both be pd.DataFrames'\n assert df.spatial.validate(), 'Origin and destination dataframes must be valid Spatially enabled DataFrames.' \\\n 'This can be checked using df.spatial.validate()'\n\n assert isinstance(source, (str, Path, Country, GIS)), 'source must be either a path to the network dataset, a ' \\\n 'dm.Country object instance, or a reference to a GIS.'\n\n assert isinstance(single_row_per_origin, bool)\n\n assert origin_id_column in origin_dataframe.columns, f'The provided origin_id_column does not appear to be in ' \\\n f'the origin_dataframe columns ' \\\n f'[{\", \".join(origin_dataframe.columns)}]'\n\n assert destination_id_column in destination_dataframe.columns, f'The provided destination_id_column does not ' \\\n f'appear to be in the destination_dataframe ' \\\n f'columns ' \\\n f'[{\", \".join(destination_dataframe.columns)}]'\n\n # if the source is a country set to local, we are using Business Analyst, so interrogate the source\n if isinstance(source, Country):\n\n # if local, get the path to the network dataset\n if source.source == 'local':\n source = get_ba_key_value('StreetsNetwork', source.geo_name)\n\n # if not local, set the source to the GIS object instance\n else:\n source = source.source\n\n # if the source is a path, convert it to a string because arcpy doesn't do well with path objects\n source = str(source) if isinstance(source, Path) else source\n\n # if a path, ensure it exists\n if isinstance(source, str):\n assert arcpy.Exists(source), f'The path to the network dataset provided does not appear to exist - ' \\\n f'\"{str(source)}\".'\n\n # include any columns to be retained in the output\n if destination_columns_to_keep is not None:\n\n # if just a single column is provided in a string, make it into a list\n if isinstance(destination_columns_to_keep, list):\n dest_cols = destination_columns_to_keep\n else:\n dest_cols = [destination_columns_to_keep]\n\n # make sure the destination columns include the id columns\n dest_cols = dest_cols if destination_id_column in dest_cols else [destination_id_column] + dest_cols\n\n # check all the columns to make sure they are in the output dataframe\n for col in dest_cols:\n assert col in destination_dataframe.columns, f'One of the destination_columns_to_keep {col}, does not ' \\\n f'appear to be in the destination_dataframe columns ' \\\n f'[{\", \".join(destination_dataframe.columns)}].'\n\n # if no columns, just populate an empty list so nested functions work\n else:\n dest_cols = []\n\n # now, the source is either a path to the network source or a GIS object instance, so call each as necessary\n if isinstance(source, str):\n raw_near_df = _get_nearest_local(origin_dataframe, destination_dataframe, source, origin_id_column,\n destination_id_column, destination_count)\n\n else:\n raw_near_df = _get_nearest_gis(origin_dataframe, destination_dataframe, source, origin_id_column,\n destination_id_column, destination_count)\n\n # reformat and standardize the output\n std_clstst_df = _reformat_closest_result_dataframe(raw_near_df)\n\n if dest_cols:\n if len(dest_cols):\n # add the columns onto the near dataframe for output\n dest_join_df = destination_dataframe[dest_cols].set_index(destination_id_column)\n std_clstst_df = std_clstst_df.join(dest_join_df, on='destination_id')\n\n # pivot and explode the results to be a single row for each origin if desired\n if single_row_per_origin:\n xplod_dest_cols = [c for c in dest_cols if c != destination_id_column]\n near_df = _explode_closest_rank_dataframe(std_clstst_df, dest_keep_cols=xplod_dest_cols)\n else:\n near_df = std_clstst_df\n\n # add prefixes to columns if provided\n if near_prefix is not None:\n near_df.columns = [f'{near_prefix}_{c}' for c in near_df.columns]\n near_oid_col = f'{near_prefix}_origin_id'\n else:\n near_oid_col = 'origin_id'\n\n # add results to input data\n if single_row_per_origin:\n out_df = origin_dataframe.join(near_df.set_index(near_oid_col), on=origin_id_column)\n\n else:\n out_df = near_df.join(origin_dataframe.drop(columns='SHAPE').set_index(origin_id_column), on=near_oid_col)\n out_df.columns = [c if not c.endswith('_SHAPE') else 'SHAPE' for c in out_df.columns]\n\n # shuffle the columns so the geometry is at the end\n if out_df.spatial.name is not None:\n out_df = out_df[[c for c in out_df.columns if c != out_df.spatial.name] + [out_df.spatial.name]]\n\n # recognize geometry\n out_df.spatial.set_geometry('SHAPE')\n\n return out_df\n\n\ndef get_travel_modes(source: Union[Path, GIS, Country], all_properties=False):\n \"\"\"\n Retrieve travel modes for the specified routing source.\n Args:\n source: Path to an ArcGIS Network Dataset if working in an environment with\n ArcGIS Pro providing access to ``arcpy``. If using a connection to a\n Web GIS thorough a GIS object instance, the GIS object instance.\n Finally, if using a Country object instance, the Country.\n all_properties: Get all available properties for all the travel modes.\n Default is False.\n\n Returns:\n Dataframe of available travel modes and descriptions for each.\n \"\"\"\n # if the source is a country, extract the source from the country\n if isinstance(source, Country):\n source = source.source\n\n # if a string, make sure simply set to local, and make sure arcpy is available\n if isinstance(source, str):\n source = source.lower()\n assert source == 'local', 'If intending to use local resources for analysis, you must use the \"local\" ' \\\n f'keyword. You provided, \"{source}\".'\n assert arcpy_avail, 'To use local resources for routing, you must be using an environment with arcpy ' \\\n 'available (ArcGIS Pro installed).'\n\n # otherwise, we SHOULD be dealing with a GIS\n elif isinstance(source, GIS):\n\n # get the url from the gis properties, retrieve the route properties, and format travel modes into a DataFrame\n prop = source._con.get(source.properties.helperServices.route.url)\n trvl_df = pd.DataFrame(prop['supportedTravelModes'])\n\n # populate the key for easy lookups\n trvl_df['key'] = trvl_df['name'].str.lower().str.replace(' ', '_')\n trvl_df.set_index('key', inplace=True, drop=True)\n\n # unless more is desired, keep it simple\n if not all_properties:\n trvl_df = trvl_df.loc[:, ['name', 'type', 'description']]\n\n else:\n raise Exception(f'The source must be either \"local\", a GIS object, or a Country object, not \"{type(source)}\".')\n\n return trvl_df\n\n","repo_name":"knu2xs/demographic-modeling","sub_path":"src/modeling/proximity.py","file_name":"proximity.py","file_ext":"py","file_size_in_byte":25843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27402405321","text":"from rest_framework import viewsets\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom posts.models import Comment, Group, Post, User\n\nfrom .permissions import IsAuthorOrReadOnly\nfrom .serializers import (CommentSerializer, GroupSerializer, PostSerializer,\n UserSerializer)\n\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\nclass PostViewSet(viewsets.ModelViewSet):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n permission_classes = [IsAuthenticated, IsAuthorOrReadOnly]\n\n def perform_create(self, serializer):\n \"\"\"\n Method to call the serializer for\n creation and saving the post.\n \"\"\"\n serializer.save(author=self.request.user)\n\n\nclass GroupViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n permission_classes = [IsAuthenticated]\n\n\nclass CommentViewSet(viewsets.ModelViewSet):\n serializer_class = CommentSerializer\n permission_classes = [IsAuthenticated, IsAuthorOrReadOnly]\n\n def get_queryset(self):\n \"\"\"\n This view should return a list of all posts comments\n for the currently authenticated user.\n \"\"\"\n post = get_object_or_404(Post, id=self.kwargs.get(\"post_id\"))\n queryset = Comment.objects.filter(post=post)\n return queryset\n\n def perform_create(self, serializer):\n \"\"\"\n Method to call the serializer for\n creation and saving the comment to the post.\n \"\"\"\n post = get_object_or_404(Post, id=self.kwargs.get(\"post_id\"))\n serializer.save(author=self.request.user, post=post)\n","repo_name":"nastyatonkova/api_yatube","sub_path":"yatube_api/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70706719624","text":"\nimport pickle\nimport subprocess\n\nxor_inequalities_between_n_input_bits_file_path = \\\n \"./claasp/cipher_modules/models/milp/dictionary_containing_xor_inequalities_between_n_input_bits.obj\"\n\n\ndef generate_all_possible_points_with_n_bits(number_of_bits):\n all_possible_points = []\n tmp = []\n for integer in range(1 << number_of_bits):\n for index in range(number_of_bits):\n tmp.append((integer & (1 << index)) >> index)\n all_possible_points.append(tmp)\n tmp = []\n\n return all_possible_points\n\n\ndef generate_impossible_points_for_xor_between_n_input_bits(number_of_bits):\n all_possible_points = generate_all_possible_points_with_n_bits(number_of_bits + 1)\n impossible_points = []\n for point in all_possible_points:\n if sum(point) % 2 == 1:\n impossible_points.append(\"\".join([str(i) for i in point]))\n\n return impossible_points\n\n\ndef update_dictionary_that_contains_xor_inequalities_between_n_input_bits(number_of_input_bits):\n dictio = output_dictionary_that_contains_xor_inequalities()\n\n if number_of_input_bits not in dictio.keys():\n print(f\"Adding xor inequalities between {number_of_input_bits} input bits in pre-saved dictionary\")\n dictio[number_of_input_bits] = generate_impossible_points_for_xor_between_n_input_bits(number_of_input_bits)\n write_file = open(\n xor_inequalities_between_n_input_bits_file_path,\n 'wb')\n pickle.dump(dictio, write_file)\n write_file.close()\n\n\ndef update_dictionary_that_contains_xor_inequalities_for_specific_matrix(mat):\n number_of_1_in_each_cols = []\n for i in range(len(mat[0])):\n number_of_1 = 0\n col = [row[i] for row in mat]\n for bit in col:\n if bit:\n number_of_1 += 1\n if number_of_1 > 1:\n number_of_1_in_each_cols.append(number_of_1)\n number_of_1_in_each_cols = list(set(number_of_1_in_each_cols))\n for number_of_input_bits in number_of_1_in_each_cols:\n update_dictionary_that_contains_xor_inequalities_between_n_input_bits(number_of_input_bits)\n\n\ndef output_dictionary_that_contains_xor_inequalities():\n try:\n read_file = open(xor_inequalities_between_n_input_bits_file_path, 'rb')\n dictio = pickle.load(read_file)\n read_file.close()\n except (OSError, EOFError):\n dictio = {}\n return dictio\n\n\ndef delete_dictionary_that_contains_xor_inequalities():\n write_file = open(xor_inequalities_between_n_input_bits_file_path, 'wb')\n pickle.dump({}, write_file)\n write_file.close()","repo_name":"Crypto-TII/claasp","sub_path":"claasp/cipher_modules/models/milp/utils/generate_inequalities_for_xor_with_n_input_bits.py","file_name":"generate_inequalities_for_xor_with_n_input_bits.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"} +{"seq_id":"14614503098","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/9/20 下午3:30\n# @Author : DaiPuWei\n# @Email : 771830171@qq.com\n# @File : get_dr_json.py\n# @Software: PyCharm\n\n\"\"\"\"\n 这是利用YOLO v3模型对测试数据集进行检测,\n 生成测试数据集每张图片检测结果json文件的脚本\n\"\"\"\n\nimport os\nimport sys\nimport json\nimport argparse\nimport colorsys\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n\nfrom config.config import get_cfg\nfrom utils.model_utils import NpEncoder\nfrom utils.model_utils import get_anchors\nfrom utils.model_utils import get_classes\nfrom utils.dataset_utils import letterbox_image\n\nparser = argparse.ArgumentParser(description='get_gt_json parameters')\nparser.add_argument('--dataset_dir', type=str,help=\"voc dataset dir\")\nparser.add_argument('--config_file_path', type=str,help=\"yolo model yaml file path\")\nparser.add_argument('--dataset_name', type=str)\nparser.add_argument('--ext', type=str,help=\"Image's ext such as .jpg, .png\")\nparser.add_argument('--images_optional_flag', action='store_true', default=False)\nparser.add_argument('--h', type=int)\nparser.add_argument('--w', type=int)\nparser.add_argument(\"opts\",help=\"Modify config options using the command-line\",default=None,nargs=argparse.REMAINDER)\nargs = parser.parse_args()\n\nclass mAP_YOLO(object):\n\n def __init__(self,cfg,image_size):\n '''\n 计算mAP的YOLO模型类的初始化函数\n Args:\n cfg: 参数字典\n '''\n # 初始化相关参数\n self.cfg = cfg\n self.model_name = cfg.MODEL.MODEL_NAME\n self.image_size = image_size\n self.font_path = os.path.abspath(cfg.FONT_PATH)\n self.detection_result_dir = os.path.abspath(\"./mAP/input/detection-results/\")\n\n # 初始化目标框与模版框\n self.classes_names = get_classes(os.path.abspath(cfg.DATASET.CLASSES_PATH))\n self.anchors = get_anchors(os.path.abspath(cfg.DATASET.ANCHORS_PATH))\n self.num_anchors = len(self.anchors)\n self.num_classes = len(self.classes_names)\n\n self.generate()\n\n def generate(self):\n '''\n 这是生成YOLO v3检测计算图,并对检测结果进行解码的函数\n '''\n # 初始化不同YOLO模型\n if self.model_name == \"yolov3\": # yolov3\n from model.yolov3 import build_yolov3_eval\n self.yolo_eval_model = build_yolov3_eval(self.cfg)\n elif self.model_name == 'yolov3-spp': # yolov3-spp\n from model.yolov3 import build_yolov3_eval\n self.yolo_eval_model = build_yolov3_eval(self.cfg)\n elif self.model_name == 'yolov4': # yolov4\n from model.yolov4 import build_yolov4_eval\n self.yolo_eval_model = build_yolov4_eval(self.cfg)\n elif self.model_name == 'yolov4-csp': # yolov4-csp\n from model.yolov4_csp import build_yolov4_csp_eval\n self.yolo_eval_model = build_yolov4_csp_eval(self.cfg)\n elif self.model_name == 'yolov4-p5': # yolov4-p5\n from model.yolov4_p5 import build_yolov4_p5_eval\n self.yolo_eval_model = build_yolov4_p5_eval(self.cfg)\n elif self.model_name == 'yolov4-p6': # yolov4-p6\n from model.yolov4_p6 import build_yolov4_p6_eval\n self.yolo_eval_model = build_yolov4_p6_eval(self.cfg)\n elif self.model_name == 'yolov4-p7': # yolov4-p7\n from model.yolov4_p7 import build_yolov4_p7_eval\n self.yolo_eval_model = build_yolov4_p7_eval(self.cfg)\n elif self.model_name == 'yolov3-tiny': # yolov3-tiny\n from model.yolov3_tiny import build_yolov3_tiny_eval\n self.yolo_eval_model = build_yolov3_tiny_eval(self.cfg)\n elif self.model_name == 'yolov4-tiny': # yolov4-tiny\n from model.yolov4_tiny import build_yolov4_eval\n self.yolo_eval_model = build_yolov4_eval(self.cfg)\n else: # 默认为yolov3\n from model.yolov3 import build_yolov3_eval\n self.cfg.MODEL.USE_SPP = False\n self.yolo_eval_model = build_yolov3_eval(self.cfg)\n\n # 画框设置不同的颜色\n hsv_tuples = [(x / len(self.classes_names), 1., 1.)\n for x in range(len(self.classes_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n\n # 打乱颜色\n np.random.seed(10101)\n np.random.shuffle(self.colors)\n np.random.seed(None)\n\n def detect_image(self, image):\n '''\n 这是利用YOLO模型对图像检测的函数\n Args:\n image: 输入图像\n Returns:\n '''\n # 检测图像并保存图像检测结果\n # 检测图像并保存图像检测结果\n detection_results = []\n # 调整图片使其符合输入要求\n if self.cfg.DATASET.LETTERBOX_IMAGE:\n boxed_image = letterbox_image(image, (self.image_size[1], self.image_size[0]))\n else:\n boxed_image = image.convert('RGB')\n boxed_image = boxed_image.resize((self.image_size[1], self.image_size[0]), Image.BICUBIC)\n image_data = np.array(boxed_image, dtype='float32')\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n # 预测结果\n input_image_shape = np.expand_dims(np.array([image.size[1], image.size[0]], dtype='float32'), 0)\n outputs = self.yolo_eval_model.predict([image_data,input_image_shape])\n out_boxes, out_scores, out_classes = outputs\n\n print('Found {} boxes for {}'.format(len(out_boxes), 'img'))\n # ---------------------------------------------------------#\n # 设置字体\n # ---------------------------------------------------------#\n font = ImageFont.truetype(font=self.font_path,\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = max((image.size[0] + image.size[1]) // 300, 1)\n\n for i, c in enumerate(out_classes):\n predicted_class = self.classes_names[int(c)]\n score = str(out_scores[i])\n top, left, bottom, right = out_boxes[i]\n detection_results.append([predicted_class, int(c), float(score[:6]), int(left),\n int(top), int(right), int(bottom)])\n\n top = top - 5\n left = left - 5\n bottom = bottom + 5\n right = right + 5\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n\n # 画框框\n label = '{} {:.2f}'.format(predicted_class, float(score))\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n label = label.encode('utf-8')\n print(label, top, left, bottom, right)\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n for i in range(thickness):\n draw.rectangle([left + i, top + i, right - i, bottom - i],outline=self.colors[c])\n draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)],fill=self.colors[c])\n draw.text(text_origin, str(label, 'UTF-8'), fill=(0, 0, 0), font=font)\n del draw\n return image\n\ndef run_main():\n \"\"\"\n 这是主函数\n \"\"\"\n # 初始化YOLO模型训练阶段的超参数\n cfg = get_cfg()\n cfg.merge_from_file(os.path.abspath(args.config_file_path))\n cfg.merge_from_list(args.opts)\n\n # 初始化目标分类名称指点\n classes_path = os.path.abspath(cfg.DATASET.CLASSES_PATH)\n classes = get_classes(classes_path)\n cls2num_dict = dict(zip(classes, np.arange(len(classes))))\n\n # 初始化YOLO模型\n image_size = (args.h, args.w)\n yolo = mAP_YOLO(cfg, image_size)\n\n ext = args.ext\n images_optional_flag = args.images_optional_flag # 默认为False,不写入图片\n dataset_name = args.dataset_name\n input_dir = os.path.abspath(\"./input/{0}\".format(dataset_name))\n image_result_dir = os.path.join(input_dir, 'images')\n if images_optional_flag:\n if not os.path.exists(image_result_dir):\n os.makedirs(image_result_dir)\n\n # 初始化测试数据集txt文件\n dataset_dir = os.path.abspath(args.dataset_dir)\n dr_result_json_path = os.path.join(input_dir, 'dr_result.json')\n dr_result = {}\n image_array = []\n annotation_array = []\n img_cnt = 0\n anno_cnt = 0\n with open(dr_result_json_path, 'w+') as f:\n test_txt_path = os.path.join(dataset_dir,\"ImageSets\",\"Main\",\"val.txt\")\n image_ids = []\n with open(test_txt_path,\"r\") as g:\n for line in g.readlines():\n image_id = line.strip()\n image_ids.append(image_id)\n for image_id in image_ids:\n image_path = os.path.join(dataset_dir,\"JPEGImages\",image_id+ext)\n image = Image.open(image_path)\n detect_results,image = yolo.detect_image(image)\n if images_optional_flag:\n image.save(os.path.join(image_result_dir,))\n image_array.append({'file_name': image_id + \".jpg\", 'id': img_cnt, 'width': 960, 'height': 720})\n if len(detect_results) > 0:\n for _detect_result in detect_results:\n cls_name,cls_num,score,xmin,ymin,xmax,ymax = _detect_result\n w = xmax-xmin\n h = ymax-ymin\n annotation_array.append({'image_id': img_cnt,\n 'iscrowd':0,\n 'bbox': [int(xmin),int(ymin),w,h],\n 'area':int(w*h),\n \"category_id\": cls_num,\n 'id': anno_cnt,\n 'score':score})\n anno_cnt += 1\n else:\n continue\n img_cnt += 1\n dr_result['images'] = image_array\n dr_result[\"annotations\"] = annotation_array\n dr_result[\"categories\"] = [{\"id\": id, \"name\": cls_name} for cls_name, id in cls2num_dict.items()]\n dr_result_json_data = json.dumps(dr_result,indent=4,separators=(',', ': '),cls=NpEncoder)\n print(dr_result_json_data)\n f.write(dr_result_json_data)\n print(\"Finished Detecting Test Dataset, Detection Result Conversion Completed!\")\n\nif __name__ == '__main__':\n run_main()","repo_name":"Daipuwei/YOLO-tf2","sub_path":"mAP_COCO/get_dr_json.py","file_name":"get_dr_json.py","file_ext":"py","file_size_in_byte":11035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32578185900","text":"import logging\nimport os\nimport sys\nimport types\n\nfrom copy import deepcopy\n\nfrom .config import config as teuth_config\nfrom .exceptions import ConnectionLostError\nfrom .job_status import set_status\nfrom .misc import get_http_log_path\nfrom .sentry import get_client as get_sentry_client\nfrom .timer import Timer\n\nlog = logging.getLogger(__name__)\n\n\ndef get_task(name):\n if '.' in name:\n module_name, task_name = name.split('.')\n else:\n module_name, task_name = (name, 'task')\n\n # First look for the tasks's module inside teuthology\n module = _import('teuthology.task', module_name, task_name)\n # If it is not found, try ceph-qa-suite (if it is in sys.path)\n if not module:\n module = _import('tasks', module_name, task_name)\n # If it is still not found, fail\n if not module:\n raise ImportError(\"Could not find task '{}'\".format(name))\n try:\n # Attempt to locate the task object inside the module\n task = getattr(module, task_name)\n # If we get another module, we need to go deeper\n if isinstance(task, types.ModuleType):\n task = getattr(task, task_name)\n except AttributeError:\n log.error(\"No subtask of '{}' named '{}' was found\".format(\n module_name,\n task_name,\n ))\n raise\n return task\n\n\ndef _import(from_package, module_name, task_name):\n full_module_name = '.'.join([from_package, module_name])\n try:\n module = __import__(\n full_module_name,\n globals(),\n locals(),\n [task_name],\n 0,\n )\n except ImportError:\n return None\n return module\n\n\ndef run_one_task(taskname, **kwargs):\n taskname = taskname.replace('-', '_')\n task = get_task(taskname)\n return task(**kwargs)\n\n\ndef run_tasks(tasks, ctx):\n archive_path = ctx.config.get('archive_path')\n if archive_path:\n timer = Timer(\n path=os.path.join(archive_path, 'timing.yaml'),\n sync=True,\n )\n else:\n timer = Timer()\n stack = []\n try:\n for taskdict in tasks:\n try:\n ((taskname, config),) = taskdict.iteritems()\n except (ValueError, AttributeError):\n raise RuntimeError('Invalid task definition: %s' % taskdict)\n log.info('Running task %s...', taskname)\n timer.mark('%s enter' % taskname)\n manager = run_one_task(taskname, ctx=ctx, config=config)\n if hasattr(manager, '__enter__'):\n stack.append((taskname, manager))\n manager.__enter__()\n except BaseException as e:\n if isinstance(e, ConnectionLostError):\n # Prevent connection issues being flagged as failures\n set_status(ctx.summary, 'dead')\n else:\n # the status may have been set to dead, leave it as-is if so\n if not ctx.summary.get('status', '') == 'dead':\n set_status(ctx.summary, 'fail')\n if 'failure_reason' not in ctx.summary:\n ctx.summary['failure_reason'] = str(e)\n log.exception('Saw exception from tasks.')\n\n sentry = get_sentry_client()\n if sentry:\n config = deepcopy(ctx.config)\n\n tags = {\n 'task': taskname,\n 'owner': ctx.owner,\n }\n if 'teuthology_branch' in config:\n tags['teuthology_branch'] = config['teuthology_branch']\n if 'branch' in config:\n tags['branch'] = config['branch']\n\n # Remove ssh keys from reported config\n if 'targets' in config:\n targets = config['targets']\n for host in targets.keys():\n targets[host] = ''\n\n job_id = ctx.config.get('job_id')\n archive_path = ctx.config.get('archive_path')\n extra = dict(config=config,\n )\n if job_id:\n extra['logs'] = get_http_log_path(archive_path, job_id)\n\n exc_id = sentry.get_ident(sentry.captureException(\n tags=tags,\n extra=extra,\n ))\n event_url = \"{server}/?q={id}\".format(\n server=teuth_config.sentry_server.strip('/'), id=exc_id)\n log.exception(\" Sentry event: %s\" % event_url)\n ctx.summary['sentry_event'] = event_url\n\n if ctx.config.get('interactive-on-error'):\n ctx.config['interactive-on-error'] = False\n from .task import interactive\n log.warning('Saw failure during task execution, going into interactive mode...')\n interactive.task(ctx=ctx, config=None)\n # Throughout teuthology, (x,) = y has been used to assign values\n # from yaml files where only one entry of type y is correct. This\n # causes failures with 'too many values to unpack.' We want to\n # fail as before, but with easier to understand error indicators.\n if type(e) == ValueError:\n if e.message == 'too many values to unpack':\n emsg = 'Possible configuration error in yaml file'\n log.error(emsg)\n ctx.summary['failure_info'] = emsg\n finally:\n try:\n exc_info = sys.exc_info()\n while stack:\n taskname, manager = stack.pop()\n log.debug('Unwinding manager %s', taskname)\n timer.mark('%s exit' % taskname)\n try:\n suppress = manager.__exit__(*exc_info)\n except Exception as e:\n if isinstance(e, ConnectionLostError):\n # Prevent connection issues being flagged as failures\n set_status(ctx.summary, 'dead')\n else:\n set_status(ctx.summary, 'fail')\n if 'failure_reason' not in ctx.summary:\n ctx.summary['failure_reason'] = str(e)\n log.exception('Manager failed: %s', taskname)\n\n if exc_info == (None, None, None):\n # if first failure is in an __exit__, we don't\n # have exc_info set yet\n exc_info = sys.exc_info()\n\n if ctx.config.get('interactive-on-error'):\n from .task import interactive\n log.warning(\n 'Saw failure during task cleanup, going into interactive mode...')\n interactive.task(ctx=ctx, config=None)\n else:\n if suppress:\n sys.exc_clear()\n exc_info = (None, None, None)\n\n if exc_info != (None, None, None):\n log.debug('Exception was not quenched, exiting: %s: %s',\n exc_info[0].__name__, exc_info[1])\n raise SystemExit(1)\n finally:\n # be careful about cyclic references\n del exc_info\n timer.mark(\"tasks complete\")\n","repo_name":"zglnsyyj/teuthology","sub_path":"teuthology/run_tasks.py","file_name":"run_tasks.py","file_ext":"py","file_size_in_byte":7110,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13066303264","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTest module for the AddRowToDataFrame operation\n\n\"\"\"\n\n__author__ = 'Samir Adrik'\n__email__ = 'samir.adrik@gmail.com'\n\nfrom pandas import DataFrame\n\nimport pytest as pt\n\nfrom source.app import AddRowToDataFrame, Operation\nfrom source.util import TrackingError\n\n\nclass TestAddRowToDataFrame:\n \"\"\"\n Test cases for the AddRowToDataFrame operation\n\n \"\"\"\n\n @classmethod\n def setup(cls):\n \"\"\"\n Executed before all tests\n\n \"\"\"\n cls.row = {'prisantydning': '3 325 000 kr'}\n cls.dataframe = {\"historikk\": DataFrame({'Tinglyst': {0: '23.01.2019', 1: '23.10.2017'},\n 'Boligtype': {0: 'Blokkleilighet',\n 1: 'Blokkleilighet'},\n 'Seksjonsnummer': {0: '4', 1: '4'},\n 'Pris': {0: '3\\xa0490\\xa0000 kr',\n 1: '2\\xa0570\\xa0000 kr'}})}\n cls.desc = \"Add List Price to Ownership History\"\n cls.add_row_to_dataframe = AddRowToDataFrame(cls.row, cls.dataframe, cls.desc)\n\n def test_add_row_to_dataframe_is_instance_of_operation(self):\n \"\"\"\n Test that AddRowToDataFrame is instance and subclass of Operation\n\n \"\"\"\n for parent in [AddRowToDataFrame, Operation]:\n assert isinstance(self.add_row_to_dataframe, parent)\n assert issubclass(self.add_row_to_dataframe.__class__, parent)\n\n @pt.mark.parametrize('invalid_row', ['test', 90210])\n @pt.mark.parametrize('invalid_dataframe', [True, 90210.0, ('test', 'test')])\n @pt.mark.parametrize('invalid_desc', [['test'], {'test': 'test'}])\n def test_invalid_args_raises_tracking_error(self, invalid_row, invalid_dataframe, invalid_desc):\n \"\"\"\n Test that AddRowToDataFrame object raises TrackingError if row, dataframe or desc\n argument are invalid\n\n \"\"\"\n with pt.raises(TrackingError):\n AddRowToDataFrame(invalid_row, self.dataframe, self.desc)\n with pt.raises(TrackingError):\n AddRowToDataFrame(self.row, invalid_dataframe, self.desc)\n with pt.raises(TrackingError):\n AddRowToDataFrame(self.row, self.dataframe, invalid_desc)\n\n def test_arguments_gets_set_in_object(self):\n \"\"\"\n Test that arguments gets set in the AddRowToDataFrame object\n\n \"\"\"\n assert self.add_row_to_dataframe.row == self.row\n assert self.add_row_to_dataframe.dataframe == self.dataframe\n assert self.add_row_to_dataframe.desc == \"id: \" + self.desc\n\n def test_add_row_to_dataframe_run_method(self):\n \"\"\"\n Test the run method in AddRowToDataFrame operation\n\n \"\"\"\n add_row_to_dataframe = AddRowToDataFrame(self.row, self.dataframe, self.desc)\n assert add_row_to_dataframe.run() == self.add_row_to_dataframe.run()\n assert add_row_to_dataframe.run() == {\n 'Tinglyst': {0: 'Prisantydning', 1: '23.01.2019', 2: '23.10.2017'},\n 'Boligtype': {0: '-', 1: 'Blokkleilighet', 2: 'Blokkleilighet'},\n 'Seksjonsnummer': {0: '-', 1: '4', 2: '4'},\n 'Pris': {0: '3 325 000 kr', 1: '3\\xa0490\\xa0000 kr', 2: '2\\xa0570\\xa0000 kr'}}\n\n def test_add_row_to_dataframe_run_method_without_dataframe(self):\n \"\"\"\n Test the run method in AddRowToDataFrame operation without passed DataFrame\n\n \"\"\"\n add_row_to_dataframe = AddRowToDataFrame(self.row, None, self.desc)\n assert add_row_to_dataframe.run() == {'Tinglyst': {0: 'Prisantydning'},\n 'Boligtype': {0: '-'},\n 'Bolig identifikasjon': {0: '-'},\n 'Pris': {0: '3 325 000 kr'}}\n","repo_name":"seemir/stressa","sub_path":"tests/app/processing/engine/test_add_row_to_dataframe.py","file_name":"test_add_row_to_dataframe.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71023567304","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@file: data_tools.py\n@time: 2020/4/7 下午5:57\n@author: shenpinggang\n@contact: 1285456152@qq.com\n@desc:数据预处理工具,主要负责对训练数据、测试数据、单个输入数据预处理。根据场景有分词、词性标注、命名实体识别\n# 1. Preprocess.processing_text 预处理输入的句子或文本。\n# 2. Preprocess.read_file_data 读取普通文件数据。\n# 3. Preprocess.read_json_data 读取json文件数据。\n\"\"\"\n\nimport os\nimport re\nfrom tqdm import tqdm\n\ntry:\n from module.core.exception import exception_handling, \\\n FileNotFoundException\nexcept ModuleNotFoundError:\n from .exception import exception_handling, \\\n FileNotFoundException\n\n\nclass DataTools(object):\n \"\"\"\n 数据处理工具集\n \"\"\"\n\n def __init__(self):\n pass\n\n class Preprocess(object):\n \"\"\"\n 分词预处理类\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def processing_text(text, del_start_str=None, del_end_str=None, handle_func=None):\n \"\"\"\n 预处理文本,用于处理输入句子。可删除句子的前后标记字符和对句子进行正则化处理。\n :param text: (str, mandatory) 文本\n :param del_start_str: (list or str, optional, default=None) 需要删除文本的开始标记字符\n :param del_end_str: (list or str, optional, default=None) 需要删除文本的结束标记字符\n :param handle_func: (function, optional, default=None) 回调处理函数,对读取的每行数据进行处理。\n :return: (str) 预处理的文本\n \"\"\"\n text = text.strip('\\n').strip()\n if del_start_str is not None:\n if isinstance(del_start_str, (list, tuple)):\n for start_str in del_start_str:\n if text[:len(start_str)] in del_start_str:\n text = text[len(start_str):]\n text = text.strip()\n\n elif isinstance(del_start_str, str):\n if text[:len(del_start_str)] == del_start_str:\n text = text[len(del_start_str):]\n text = text.strip()\n\n if del_end_str is not None:\n if isinstance(del_end_str, (list, tuple)):\n for end_str in del_end_str:\n if text[-len(end_str):] in del_end_str:\n text = text[:-len(end_str)]\n text = text.strip()\n\n elif isinstance(del_end_str, str):\n if text[-len(del_end_str):] == del_end_str:\n text = text[:-len(del_end_str)]\n text = text.strip()\n\n if handle_func is not None:\n text = handle_func(text)\n\n return text\n\n @staticmethod\n @exception_handling\n def read_file_data(file, del_start_str=None, del_end_str=None, handle_func=None):\n \"\"\"\n 读取文件里的数据。主要用于读取训练数据和测试数据。通过行读取的方式读取数据集。\n :param file: (str, mandatory) 数据文件\n :param del_start_str: (str, optional, default=None) 需要删除的开始标记字符\n :param del_end_str: (str, optional, default=None) 需要删除的结束标记字符\n :param handle_func: (function, optional, default=None) 回调处理函数,对读取的每行数据进行处理。\n :return: (list) 预处理的数据列表\n \"\"\"\n if not os.path.isfile(file):\n raise FileNotFoundException(file)\n\n data = list()\n with open(file, \"rb\") as lines:\n for line in tqdm(lines):\n line = str(line, encoding=\"utf8\")\n\n line = DataTools.Preprocess.processing_text(line, del_start_str, del_end_str, handle_func)\n\n data.append(line)\n\n return data\n\n\ndef test_read_file_data(file, del_start_str=None, del_end_str=None, handle_func=None):\n \"\"\"测试函数\"\"\"\n data = DataTools.Preprocess.read_file_data(file, del_start_str, del_end_str, handle_func)\n with open(file, \"rb\") as lines:\n for source_data, target_data in zip(lines, data):\n source_data = str(source_data, encoding=\"utf8\").strip('\\n')\n print(\"source data:\", source_data)\n print(\"target data:\", target_data)\n print()\n\n\nif __name__ == \"__main__\":\n def regular(sent):\n \"\"\"测试正则化函数\"\"\"\n sent = re.sub('[,]{1,100}', ',', sent)\n return sent\n\n\n def handle(line):\n \"\"\"测试数据处理函数\"\"\"\n import json\n\n json_data = json.loads(line)\n text = json_data['text']\n label = json_data['label']\n\n identifier_b, identifier_i, identifier_o, identifier_e, identifier_s = \"B\", \"I\", \"O\", \"E\", \"S\"\n identifier_format = lambda i, s: \"{}_{}\".format(i, s)\n identifier = [identifier_o] * len(text)\n\n for ner_name, ner_value in label.items():\n for ner_str, ner_index in ner_value.items():\n for n_index in ner_index:\n if text[n_index[0]:n_index[1] + 1] != ner_str:\n print(\"Data Error: no specific character found . text: {}, label: {}\".format(text, label))\n exit()\n if len(ner_str) == 1:\n identifier[n_index[0]] = identifier_format(identifier_o, ner_name)\n elif len(ner_str) == 2:\n identifier[n_index[0]] = identifier_format(identifier_b, ner_name)\n identifier[n_index[1]] = identifier_format(identifier_e, ner_name)\n elif len(ner_str) > 2:\n identifier[n_index[0]] = identifier_format(identifier_b, ner_name)\n for i in range(1, len(ner_str) - 2 + 1):\n identifier[n_index[0] + i] = identifier_format(identifier_i, ner_name)\n identifier[n_index[1]] = identifier_format(identifier_e, ner_name)\n\n return [text, identifier]\n\n\n # train_file = \"../../data/msr_training_debug.utf8\"\n # test_read_file_data(train_file, del_start_str=[\"“\", \"’\"], handle_func=regular)\n dev_file = \"../../data/cluener_public/dev.json\"\n test_read_file_data(dev_file, handle_func=handle)\n","repo_name":"boyshen/BLTP","sub_path":"module/core/data_tools.py","file_name":"data_tools.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70178581706","text":"from django.contrib.auth.models import User\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import serializers\nfrom constants.http_messages import *\nfrom constants.auth_user import AuthUser\nfrom constants.permission_checker_helper import PermissionChecker\nclass DeleteUserView(APIView):\n def validate_id(self, id):\n try:\n User.objects.get(id=id)\n except User.DoesNotExist:\n raise serializers.ValidationError(\"User does not exist.\")\n\n def delete_user(self, id):\n user = User.objects.get(id=id)\n user.delete()\n\n def post(self, request, *args, **kwargs):\n errors = {}\n data = {}\n status = None\n message = None\n\n token = AuthUser.get_token(request)\n\n if type(token) == dict:\n return Response(token)\n\n payload = AuthUser.get_user(token)\n\n if 'errors' in payload:\n return Response(payload)\n\n errors = PermissionChecker.validate_permission_control(self, payload)\n\n if len(errors) != 0:\n status = bad_request\n message = 'You are not permitted to access User Control'\n return Response({\"status\": status , \"message\": message , \"data\": data , \"errors\": errors})\n\n id = request.query_params['id']\n if not id:\n errors[\"id\"] = [\"This field is required.\"]\n status = bad_request\n else:\n try:\n self.validate_id(id)\n except serializers.ValidationError as e:\n errors = e.detail\n status = bad_request\n else:\n self.delete_user(id)\n message = \"User deleted successfully.\"\n status = ok\n\n if errors:\n status = status or bad_request\n return Response({\"status\": status, \"message\": message, \"data\": data, \"errors\": errors})\n elif message:\n return Response({\"status\": status, \"message\": message, \"data\": data, \"errors\": errors})\n else:\n return Response({\"status\": status, \"message\": message, \"data\": data, \"errors\": errors})","repo_name":"Ronuel-R/Digital_Dexterity_Backend","sub_path":"digital_dex_admin_web/versions/v1p0/features/profile_page/delete_user/views/delete_user_view.py","file_name":"delete_user_view.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13516450473","text":"'''Adding Activation function to our custom dense layer (OOP), by Saber\r\n'''\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.layers import Layer #for creating our custom dense layer\r\n\r\n#_______________________________________________________________________________\r\n\r\nclass SimpleDense(Layer): # Parent class: Layer, Child class: SimpleDense\r\n\r\n # add an activation parameter\r\n def __init__(self, units=32, activation=None): #the default of activation is None\r\n super(SimpleDense, self).__init__() #cuz inheritance\r\n self.units = units #attribute1\r\n \r\n # define the activation to get from the built-in activation layers in Keras\r\n self.activation = tf.keras.activations.get(activation) #attribute2\r\n\r\n\r\n def build(self, input_shape):\r\n w_init = tf.random_normal_initializer()\r\n self.w = tf.Variable(name=\"kernel\",\r\n initial_value=w_init(shape=(input_shape[-1], self.units),\r\n dtype='float32'),\r\n trainable=True) #w/kernel is trainable (we can do gradient/derivative)\r\n b_init = tf.zeros_initializer() #set the initial value of bias to 0\r\n self.b = tf.Variable(name=\"bias\",\r\n initial_value=b_init(shape=(self.units,), dtype='float32'),\r\n trainable=True) #bias is trainable \r\n super().build(input_shape)\r\n\r\n\r\n def call(self, inputs):\r\n \r\n # pass the computation to the activation layer\r\n return self.activation(tf.matmul(inputs, self.w) + self.b)\r\n#________________________________________________________________________________\r\n \r\nmnist = tf.keras.datasets.mnist\r\n\r\n(x_train, y_train),(x_test, y_test) = mnist.load_data()\r\nx_train, x_test = x_train / 255.0, x_test / 255.0\r\n\r\nmodel = tf.keras.models.Sequential([\r\n tf.keras.layers.Flatten(input_shape=(28, 28)),\r\n SimpleDense(128, activation='relu'), #adding activation function to our layer\r\n tf.keras.layers.Dropout(0.2), #to challenge the learning (dropping some neurons in calculation)\r\n tf.keras.layers.Dense(10, activation='softmax') #softmax► a generalized logistic reg for classification (units=10 classes)\r\n])\r\n\r\nmodel.compile(optimizer='adam',\r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\nmodel.fit(x_train, y_train, epochs=5)\r\nmodel.evaluate(x_test, y_test) \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"Saber0275/Siamese-ANN-Functional-API--by-Saber-with-Spyder","sub_path":"Adding Activation function to our custom dense layer (OOP), by Saber.py","file_name":"Adding Activation function to our custom dense layer (OOP), by Saber.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11465129100","text":"import logging\n\nfrom typing import Any, Optional\nfrom azure.core.exceptions import ServiceRequestError\nfrom click import argument, command\nfrom Babylon.utils.typing import QueryType\nfrom Babylon.utils.decorators import inject_context_with_resource, wrapcontext\nfrom Babylon.utils.decorators import timing_decorator\nfrom Babylon.utils.response import CommandResponse\nfrom Babylon.utils.environment import Environment\nfrom Babylon.utils.clients import get_registry_client\n\nlogger = logging.getLogger(\"Babylon\")\nenv = Environment()\n\n\n@command()\n@wrapcontext()\n@timing_decorator\n@argument(\"server\", type=QueryType(), required=False)\n@inject_context_with_resource({'acr': ['login_server']})\ndef list(context: Any, server: Optional[str] = None) -> CommandResponse:\n \"\"\"\n List all docker images in the specified registry\n \"\"\"\n acr_login_server = server or context['acr_login_server']\n cr_client = get_registry_client(acr_login_server)\n logger.info(f\"Getting repositories stored in registry {acr_login_server}\")\n try:\n repos = [repo for repo in cr_client.list_repository_names()]\n except ServiceRequestError:\n logger.error(f\"Could not list from registry {acr_login_server}\")\n return CommandResponse.fail()\n _ret: list[str] = [f\"Respositories from {acr_login_server}:\"]\n for repo in repos:\n props = cr_client.list_tag_properties(repository=repo)\n tags = [p.name for p in props]\n tags.sort(reverse=True)\n _ret.append(f\" • {repo}: {tags}\")\n logger.info(\"\\n\".join(_ret))\n CommandResponse.success()\n","repo_name":"Cosmo-Tech/Babylon","sub_path":"Babylon/commands/azure/acr/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"7887273084","text":"from sys import stdin, stdout\nfrom collections import deque\nimport sys\n\n\ndef union(u_a, i, j):\n ri = ufind(u_a, i)\n rj = ufind(u_a, j)\n u_a[ri] = rj\n\n\ndef ufind(u_a, i):\n if u_a[i] != i:\n u_a[i] = ufind(u_a, u_a[i])\n return u_a[i]\n\n\ntry:\n n, m, t = map(int, stdin.readline().split())\n s_a = []\n for _ in range(n):\n s_a.append(stdin.readline().strip())\n\n u_a = []\n o_a = []\n for i in range(n):\n for j in range(m):\n u_a.append(i*m + j)\n o_a.append(0)\n\n for i in range(n):\n for j in range(m):\n if i > 0 and s_a[i][j] == s_a[i-1][j]:\n union(u_a, (i-1)*m + j, i*m + j)\n if j > 0 and s_a[i][j] == s_a[i][j-1]:\n union(u_a, i*m + j-1, i*m + j)\n\n dic = {}\n q = deque()\n for i in range(len(u_a)):\n r = ufind(u_a, i)\n if r not in dic:\n dic[r] = [0, i] # 0: no adjacent, 1: has adjacent\n else:\n q.append(i)\n if dic[r][0] == 0:\n q.append(dic[r][1])\n dic[r][0] = 1\n\n step = 0\n da = [[1,0], [-1,0], [0,1], [0,-1]]\n unchange = False\n if len(q) == 0:\n unchange = True\n\n # 01011\n # 10110\n # 01101\n # 11010\n # 10101\n while len(q) > 0:\n l = len(q)\n for _ in range(l):\n cv = q.popleft()\n ci = cv // m\n cj = cv % m\n\n o_a[cv] = step\n for d in da:\n ni = ci + d[0]\n nj = cj + d[1]\n nv = ni * m + nj\n\n if 0 <= ni < n and 0 <= nj < m and dic[ufind(u_a, nv)][0] == 0:\n q.append(nv)\n dic[ufind(u_a, nv)][0] = 1\n\n step += 1\n\n for _ in range(t):\n ri, rj, p = map(int, stdin.readline().split())\n rv = (ri-1)*m + (rj-1)\n color = int(s_a[ri-1][rj-1])\n if unchange:\n res = color\n else:\n change = max(p - o_a[rv], 0)\n res = color ^ (change % 2)\n stdout.write(str(res) + '\\n')\nexcept:\n # print [\"Unexpected error:\", sys.exc_info()[0][1, 8].replace(' ', '')]\n print(sys.exc_info()[1])\n print(sys.exc_info()[0])\n print(sys.exc_info()[2])\n","repo_name":"tycyd/codeforces","sub_path":"bfs/1349C Orac and Game of Life.py","file_name":"1349C Orac and Game of Life.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39505906993","text":"prime=[True]*246913\nprimeCnt=[0]*246913\nprime[0],prime[1],primeCnt[2],i=False,False,1,2\n\nwhile i*i<=246912:\n if prime[i]==False:\n i+=1\n continue\n j=i*2\n while j<=246912:\n prime[j]=False\n j+=i\n i+=1\n\nfor i in range(3,246913): primeCnt[i]=primeCnt[i-1]+prime[i]\n\nwhile 1:\n n=int(input())\n if n==0: break\n print(primeCnt[n*2]-primeCnt[n])","repo_name":"njw1204/BOJ-AC","sub_path":"problem/01000~09999/04948/4948.py3.py","file_name":"4948.py3.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25026729214","text":"def solution(n, t, m, timetable):\n # 입력값 분 단위로 통일\n timetable = [int(time[:2])*60+int(time[3:]) for time in timetable]\n timetable.sort()\n \n start = 540\n \n for _ in range(n):\n for _ in range(m):\n #대기가 있는 경우 1초전 도착\n if timetable and timetable[0] <=start:\n candidate = timetable.pop(0)-1\n else : #대기가 없으면 정시 도착\n candidate = start\n \n start +=t\n \n h,m = divmod(candidate,60)\n \n return str(h).zfill(2)+':'+str(m).zfill(2)","repo_name":"parkchanghyup/algorithm","sub_path":"python/프로그래머스/[python] 프로그래머스 - 셔틀 버스.py","file_name":"[python] 프로그래머스 - 셔틀 버스.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24867510897","text":"import logging\r\n\r\nimport pygame\r\nfrom pygame.locals import *\r\n\r\nfrom . import image_loader\r\n\r\nclass button_sprites:\r\n arrow_left:pygame.Surface = None\r\n arrow_right:pygame.Surface = None\r\n\r\n main_customise:pygame.Surface = None\r\n main_join_game:pygame.Surface = None\r\n main_customise_A:pygame.Surface = None\r\n main_join_game_A:pygame.Surface = None\r\n\r\n text_customise:pygame.Surface = None\r\n\r\n button_left:pygame.Surface = None\r\n button_right:pygame.Surface = None\r\n button_left_A:pygame.Surface = None\r\n button_right_A:pygame.Surface = None\r\n\r\n pointer:pygame.Surface = None\r\n\r\n colour_r:pygame.Surface = None\r\n colour_g:pygame.Surface = None\r\n colour_y:pygame.Surface = None\r\n colour_b:pygame.Surface = None\r\n\r\n podium:pygame.Surface = None\r\n\r\n text_characters = []\r\n text_characters_2x = []\r\n\r\nchars = 'abcdefghijklmnopqrstuvwxyz0123456789 +'\r\n\r\ndef draw_text(surface, top_left:tuple, text:str, size:str='1x', center_x:bool=False):\r\n xp = top_left[0]\r\n if center_x:\r\n xp -= get_text_width(text, size)/2\r\n from_l = button_sprites.text_characters\r\n if size == '2x':\r\n from_l = button_sprites.text_characters_2x\r\n for char in text.lower():\r\n if char in chars:\r\n text_char = from_l[chars.index(char)]\r\n surface.blit(text_char[1], (xp, top_left[1]))\r\n xp += text_char[0]+1\r\n\r\ndef get_text_width(text:str, size:str='1x'):\r\n from_l = button_sprites.text_characters\r\n if size == '2x':\r\n from_l = button_sprites.text_characters_2x\r\n text_width = 0\r\n for char in text.lower():\r\n if char in chars:\r\n text_char = from_l[chars.index(char)]\r\n text_width += text_char[0]+1\r\n text_width -= 1\r\n return text_width\r\n\r\n\r\ndef load_button_sprites():\r\n global button_sprites\r\n logging.debug('loading button sprites')\r\n sheet = image_loader.load_image('./resources/buttons.png')\r\n\r\n button_sprites.arrow_left = pygame.transform.scale(sheet.subsurface((1,1,28,43)), (56,86))\r\n button_sprites.arrow_right = pygame.transform.scale(sheet.subsurface((30,1,28,43)), (56,86))\r\n \r\n button_sprites.main_customise = pygame.transform.scale(sheet.subsurface((1,81,115,21)), (230,42))\r\n button_sprites.main_join_game = pygame.transform.scale(sheet.subsurface((1,103,115,21)),(230,42))\r\n button_sprites.main_customise_A = pygame.transform.scale(sheet.subsurface((117,81,115,21)), (230,42))\r\n button_sprites.main_join_game_A = pygame.transform.scale(sheet.subsurface((117,103,115,21)),(230,42))\r\n\r\n button_sprites.text_customise = pygame.transform.scale(sheet.subsurface((235,83,111,17)), (222,34))\r\n \r\n button_sprites.button_left = pygame.transform.scale(sheet.subsurface((105,1,24,19)), (48,38))\r\n button_sprites.button_right = pygame.transform.scale(sheet.subsurface((130,1,24,19)),(48,38))\r\n button_sprites.button_left_A = pygame.transform.scale(sheet.subsurface((105,21,24,19)), (48,38))\r\n button_sprites.button_right_A = pygame.transform.scale(sheet.subsurface((130,21,24,19)),(48,38))\r\n\r\n button_sprites.pointer = pygame.transform.scale(sheet.subsurface((59,18,15,21)),(60,84))\r\n \r\n button_sprites.colour_r = pygame.transform.scale(sheet.subsurface((164,59,16,21)),(64,84))\r\n button_sprites.colour_g = pygame.transform.scale(sheet.subsurface((181,59,16,21)),(64,84))\r\n button_sprites.colour_y = pygame.transform.scale(sheet.subsurface((198,59,16,21)),(64,84))\r\n button_sprites.colour_b = pygame.transform.scale(sheet.subsurface((215,59,16,21)),(64,84))\r\n \r\n button_sprites.podium = pygame.transform.scale(sheet.subsurface((1,125,34,31)),(136,124))\r\n\r\n # load text characters\r\n\r\n positions = []\r\n\r\n # letter bboxes\r\n\r\n # a, b, c, d, e, f, g, h\r\n for i in range(8):\r\n bbox = (1+12*i,45,11,17)\r\n positions.append(bbox)\r\n \r\n # i\r\n positions.append((97,45,9,17),)\r\n\r\n # j, k, l\r\n for i in range(3):\r\n bbox = (107+12*i,45,11,17)\r\n positions.append(bbox)\r\n\r\n # m\r\n positions.append((143,45,17,17),)\r\n\r\n # n, o, p\r\n for i in range(3):\r\n bbox = (1+12*i,63,11,17)\r\n positions.append(bbox)\r\n \r\n # q\r\n positions.append((37,63,12,17),)\r\n\r\n # r, s, t, u, v\r\n for i in range(5):\r\n bbox = (50+12*i,63,11,17)\r\n positions.append(bbox)\r\n\r\n # w\r\n positions.append((110,63,17,17),)\r\n\r\n # x, y, z\r\n for i in range(3):\r\n bbox = (128+12*i,63,11,17)\r\n positions.append(bbox)\r\n \r\n # number bboxes\r\n\r\n # 0\r\n positions.append((155,1,11,17),)\r\n # 1\r\n positions.append((167,1,5,17),)\r\n # 2, 3, 4, 5, 6, 7, 8, 9, space\r\n for i in range(9):\r\n positions.append((173+12*i,1,11,17),)\r\n \r\n # symbols\r\n\r\n # +\r\n positions.append((155,19,11,17),)\r\n\r\n\r\n for bbox in positions:\r\n letter_surf = sheet.subsurface(bbox)\r\n letter_size = letter_surf.get_size()\r\n sx, sy = letter_size\r\n button_sprites.text_characters.append((sx, letter_surf),)\r\n button_sprites.text_characters_2x.append((sx*2, pygame.transform.scale(letter_surf, (sx*2,sy*2))),)","repo_name":"Machine-builder/python-uno","sub_path":"scripts/button_sprites.py","file_name":"button_sprites.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"22264379064","text":"import math\nimport itertools\nfrom collections import defaultdict\n\n\ndef cluster(matches, cluster_size=3, cluster_dist=20):\n clusters = set()\n votes = ght(matches, cluster_dist)\n for source, bins in votes.items():\n for bin, cluster in bins.items():\n if len(cluster) >= cluster_size:\n clusters.add(frozenset(cluster))\n clusters = [list(c) for c in clusters]\n total_clusters = [c for bins in votes.values() for c in bins.values()]\n return clusters, total_clusters\n\n\ndef ght(matches, cluster_dist=20):\n votes = defaultdict(lambda: defaultdict(set))\n try:\n dim = max(m.neighbors[0].kp.scale for m in matches)\n except:\n dim = 2\n for match in matches:\n ds = round_to(match.query.scale / match.neighbors[0].kp.scale, 2)\n d_theta = round_to(match.query.orientation - match.neighbors[0].kp.orientation, 0.4)\n dx = round_to(match.query.x - match.neighbors[0].kp.x, 1.5*dim)\n dy = round_to(match.query.y - match.neighbors[0].kp.y, 1.5*dim)\n bins = itertools.product(*(dx, dy))\n for bin in bins:\n train_kps = [tuple(m.neighbors[0].kp.kp[:2]) for m in votes[match.neighbors[0].kp.source][bin]]\n x = [m.neighbors[0].kp.x for m in votes[match.neighbors[0].kp.source][bin]]\n try:\n min_x = min(x)\n max_x = max(x)\n except:\n min_x = max_x = match.neighbors[0].kp.x\n if tuple(match.neighbors[0].kp.kp[:2]) not in train_kps:\n if min_x - cluster_dist < match.neighbors[0].kp.x < max_x + cluster_dist:\n votes[match.neighbors[0].kp.source][bin].add(match)\n return votes\n\n\ndef round_to(x, base=1, n=2):\n lo = base * math.floor(float(x)/base)\n hi = base * math.ceil(float(x)/base)\n return (lo, hi)\n","repo_name":"Curly-Mo/sample-recognition","sub_path":"hough.py","file_name":"hough.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16167768354","text":"\"\"\"create db\n\nRevision ID: 9fed58a95781\nRevises: \nCreate Date: 2023-06-17 19:38:15.122755\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '9fed58a95781'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('config',\n sa.Column('name_config', sa.UUID(), nullable=False),\n sa.Column('hash_yaml', sa.String(), nullable=True),\n sa.Column('compile_test', sa.Boolean(), nullable=True),\n sa.Column('name_esphome', sa.String(), nullable=True),\n sa.Column('platform', sa.String(), nullable=True),\n sa.Column('config_json', postgresql.JSONB(astext_type=sa.Text()), nullable=True),\n sa.PrimaryKeyConstraint('name_config')\n )\n op.create_table('users',\n sa.Column('user_id', sa.String(), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('surname', sa.String(), nullable=False),\n sa.Column('email', sa.String(), nullable=False),\n sa.Column('is_active', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('user_id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('user_id'),\n sa.UniqueConstraint('user_id')\n )\n op.create_table('user_config',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.String(), nullable=True),\n sa.Column('name_config', sa.UUID(), nullable=True),\n sa.Column('name_esphome', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['name_config'], ['config.name_config'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.user_id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user_config')\n op.drop_table('users')\n op.drop_table('config')\n # ### end Alembic commands ###\n","repo_name":"apbodrov/esphome-yaml-constructor","sub_path":"migrations/versions/9fed58a95781_create_db.py","file_name":"9fed58a95781_create_db.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"20294978782","text":"import torch\nimport random\nimport torch.nn as nn\nimport numpy as np\nimport os\nfrom config import plot_interv\nfrom .utils import prototypical_loss\n\ndef seed_everything(seed=1029):\n '''\n :param seed:\n :param device:\n :return:\n '''\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n\nclass ProtoNet():\n def __init__(self, model, all_tasks, in_dim, inner_lr, meta_lr, n_shot=1, n_query=16, N_tasks=3, seed=0):\n # important objects\n self.model = model\n self.weights = list(model.parameters()) # the maml weights we will be meta-optimising\n self.criterion = nn.CrossEntropyLoss()\n self.seed = seed\n seed_everything(self.seed)\n self.all_tasks = all_tasks\n self.in_dim = in_dim\n # hyperparameters\n self.inner_lr = inner_lr\n self.meta_lr = meta_lr\n self.n_shot = n_shot\n self.n_query = n_query\n self.N_tasks = N_tasks\n # metrics\n self.plot_every = plot_interv\n\n def main_loop(self, num_iterations):\n seed_everything(self.seed)\n\n for iteration in range(1, num_iterations + 1):\n\n if iteration == np.floor(0.5 * num_iterations):\n self.meta_lr /= 10\n\n # sample a batch of tasks\n sampled_id = np.random.choice(self.N_tasks, 1)[0]\n\n # zero out the gradient of previous iteration\n self.model.zero_grad()\n\n\n # compute meta loss\n task_i = self.all_tasks[sampled_id]\n X, y = task_i.sample_data(size=(self.n_shot + self.n_query))\n y = y.to(torch.int64)\n\n embeddings = self.model.forward(X)\n meta_loss, _ = prototypical_loss(input=embeddings, target=y, n_support=self.n_shot)\n\n # compute meta gradient of loss with respect to maml weights\n meta_grads = torch.autograd.grad(meta_loss, self.weights)\n\n # assign meta gradient to weights and take optimization step\n for w, g in zip(self.weights, meta_grads):\n w.grad = g\n\n for param in self.model.parameters():\n param.data -= self.meta_lr * param.grad.data\n\n # log metrics\n if iteration % self.plot_every == 0:\n print(\"{}/{}\".format(iteration, num_iterations))\n\n","repo_name":"bokun-wang/moml","sub_path":"one-shot/src/protonet.py","file_name":"protonet.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"30399781037","text":"import math\n\n##############################################################################\n# Prettyprint duration of time\ndef duration(time):\n\n def norm2(zahl):\n return((\"00\"+str(zahl))[-2:])\n\n gesamtzeit=time\n dauer=''\n # Tage\n days = time // (24 * 3600)\n if days == 1:\n dauer=dauer+str(days)+' day '\n if days > 1:\n dauer=dauer+str(days)+' days '\n #Stunden\n time = time % (24 * 3600)\n hours = time // 3600\n if hours == 1:\n dauer=dauer+str(hours)+' hour '\n if hours > 1:\n dauer=dauer+str(hours)+' hours '\n #Minuten\n time %= 3600\n minutes = time // 60\n if minutes >= 1:\n dauer=dauer+str(minutes)+' min '\n # Sekunden\n time %= 60\n seconds = time\n dauer=dauer+str(seconds)+' sec'\n \n ##### Testausdruck\n # print(\"d:h:m:s-> %d:%d:%d:%d\" % (days, hours, minutes, seconds))\n\n return dauer\n\n\n##############################################################################\n# Prettyprint KB, MB, GB, or TB string\ndef humanbytes(B):\n '''Return the given bytes as a human friendly KB, MB, GB, or TB string'''\n B = float(B)\n KB = float(1024)\n MB = float(KB ** 2) # 1,048,576\n GB = float(KB ** 3) # 1,073,741,824\n TB = float(KB ** 4) # 1,099,511,627,776\n #\n if B < KB:\n return '{0} {1}'.format(B,'Bytes' if 0 == B > 1 else 'Byte')\n elif KB <= B < MB:\n return '{0:.3f} KB'.format(B/KB)\n elif MB <= B < GB:\n return '{0:.3f} MB'.format(B/MB)\n elif GB <= B < TB:\n return '{0:.3f} GB'.format(B/GB)\n elif TB <= B:\n return '{0:.3f} TB'.format(B/TB)\n\n##############################################################################\n# Calculating the Perceived Brightness of a Color\n# https://www.nbdtech.com/Blog/archive/2008/04/27/Calculating-the-Perceived-Brightness-of-a-Color.aspx\ndef rgb_brightness(color):\n R = int(color[1:3], 16)\n G = int(color[3:5], 16)\n B = int(color[5:7], 16)\n return math.sqrt(0.241*R*R + 0.691*G*G + 0.068*B*B )\n# Bessere Formel in:\n# https://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color\n\n\n##############################################################################\n##############################################################################\n##############################################################################\n\nif __name__ == '__main__':\n\n ##### Test humanbytes\n # tests = [1, 1024, 500000, 1048576, 50000000, 1073741824, 5000000000, 1099511627776, 5000000000000]\n # for t in tests:\n # print('{0} == {1}'.format(t,humanbytes(t)))\n\n ##### Test rgb-brghtness\n print(rgb_brightness(\"#121314\"))\n\n ##### Test Prettyprint duration of time\n time = int(2*60*60+7)\n print(duration(time))\n","repo_name":"jennerwein/whoami","sub_path":"app/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32879401514","text":"import networkx as nx, matplotlib.pyplot as plt\nfrom networkx.algorithms import tree\n\n#punto1\n#CREAZIONE GRAFO E ARCHI \nG = nx.Graph()\ntupleList = [(0, 2, 49), (0,4,43), (1, 3, 31), (1, 2, 56)]\nfor tuple in tupleList: G.add_edge(tuple[0], tuple[1], weight=tuple[2])\n\n\n#CREAZIONE MINIMUM SPANNING TREE, LISTA DEGLI ARCHI ORDINE CRESCENTE\nedgelist = list(tree.minimum_spanning_edges(G, algorithm='kruskal', data=True))\n\n#CREAZIONE GRAFO COMPLETO CON PESO ARCHI A NONE\nG_complete=nx.complete_graph(5)\n\nNewGraph = nx.Graph()\n\n#ASSEGNAZIONE DEL PESO DEGLI ARCHI DEL MST AL GRAFO COMPLETO\nfor i in edgelist:\n a=int(i[0])\n b=int(i[1])\n G_complete[a][b]['weight']=i[2].get(\"weight\")\n\n#LISTA DEGLI ARCHI ORDINE DECRESCENTE\nprint(\"List of Edges: \", edgelist)\n\nnodi=G.nodes\nlista_nodi = [];\nfor nodo in nodi:\n lista_nodi.append(nodo)\nprint(\"List of Nodes:\", lista_nodi)\n\n\nlista_nuvole = [];\nfor nodo in lista_nodi: \n nuvola = []\n nuvola.append(nodo)\n lista_nuvole.append(list(nuvola))\nprint(\"List of Clouds: \" , lista_nuvole)\n\n\n\nfor i in edgelist:\n node1=int(i[0])\n node2=int(i[1])\n nuvola1 = nuvola2 = []\n for nuv in lista_nuvole:\n if node1 in nuv : nuvola1 = nuv \n if node2 in nuv : nuvola2 = nuv\n peso= i[2].get(\"weight\")\n for n1 in nuvola1:\n for n2 in nuvola2:\n if(not(n1 == node1 and n2 == node2)) :\n peso1=G_complete.get_edge_data(n1,n2).get(\"weight\")\n if(not peso1):\n NewGraph.add_edge(n1, n2, weight=peso+1)\n G_complete[n1][n2]['weight']=peso+1\n else: NewGraph.add_edge(n1, n2, weight=peso)\n \n nuvola3 = nuvola1.extend(nuvola2)\n nuvola1 = nuvola3\n lista_nuvole.remove(nuvola2)\n\n\n\n#punto 2\n#SCORRE TUTTA LA LISTA DEGLI ARCHI DEL GRAFO SOMMANDONE IL PESO AL CONTATORE SUMM\nsumm=0\nfor i in G_complete.edges: summ += G_complete.get_edge_data(i[0],i[1]).get(\"weight\")\nprint(\"The sum is: \" + str(summ))\n\n#VERIFICA DI CORRETTEZZA DEL MST CHE SI CREA DAL GRAFO GENERATO NEL PUNTO 1\nedgelist = list(tree.minimum_spanning_edges(G_complete, algorithm='kruskal', data=True))\nM = nx.Graph()\nM.add_edges_from(edgelist)\nnx.draw(M, with_labels=True, font_weight='bold')\nplt.title('Figure 1', y=-0.01)\nplt.show()\n \nnx.draw(G_complete, with_labels=True, font_weight='bold')\nplt.title('Figure 2', y=-0.01)\nplt.show()\n","repo_name":"LucaTomei/Algorithm_Design_HW","sub_path":"Homework_1/E2/E2.py","file_name":"E2.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28558973517","text":"# -*- coding: utf-8 -*-\n\"\"\"\nContains a bunch of information about this package.\n\"\"\"\n\n__version__ = \"0.1.7\"\n\n__author__ = \"Benjamin F. Maier\"\n__copyright__ = \"Copyright 2020-2021, \" + __author__\n__credits__ = [__author__]\n__license__ = \"MIT\"\n__maintainer__ = __author__\n__email__ = \"contact@benmaier.org\"\n__status__ = \"Development\"\n","repo_name":"benmaier/epipack","sub_path":"epipack/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"81"} +{"seq_id":"1987105180","text":"import seaborn\nimport pandas as pd\nimport numpy as np\nfrom typing import List\nimport time\nfrom experiments.problems import all_problems, BaseProblem\nfrom core.constraints import CombinedConstraint\nfrom core.sample_generation.hmc import get_hmc_samples_for_constraint\nfrom experiments.common.setup_experiment import setup_experiment, flush_logs\nimport dataclasses\n\n\n\ndef main():\n @dataclasses.dataclass\n class Options:\n problems: List[str]\n count: int = 1000\n plot: bool = False\n\n args = setup_experiment(\"sample_generation_hmc\", Options) \n\n log_dir = args.log_dir\n params = args.params\n problems = all_problems.keys() if \"All\" in params.problems else params.problems\n for k in problems:\n if k not in all_problems:\n raise ValueError(f\"Invalid problem name: {k}\")\n\n print(\"Running data generation for:\", \", \".join(problems))\n for k in problems:\n print(\"Running:\", k)\n problem:BaseProblem = all_problems[k]\n start_time = time.time()\n if problem.state_action_bound_constraint is not None:\n constraint = CombinedConstraint(problem.constraint.var_count, problem.constraint.dim - problem.constraint.var_count, [problem.constraint, problem.state_action_bound_constraint])\n else:\n constraint = problem.constraint\n s = get_hmc_samples_for_constraint(constraint, params.count, 0) \n print(f\"Sample generation time for {k}: {(time.time() - start_time):.2f} seconds\")\n if params.plot:\n df = pd.DataFrame({f\"Dim {i}\": s[:, i] for i in range(s.shape[1])})\n seaborn.pairplot(df, plot_kws={\"s\": 1}).savefig(f\"{log_dir}/{k}.png\")\n np.save(f\"{log_dir}/{k}.npy\", s)\n flush_logs()\n\nif __name__ == \"__main__\":\n main()","repo_name":"rlr-smu/flow-pg","sub_path":"experiments/generate_samples_with_hmc.py","file_name":"generate_samples_with_hmc.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13480227020","text":"#!/usr/bin/env python3\n#-*- coding: UTF-8 -*-\n\nfrom base import rus_abc, rus_cabc\n\nrev_en = {rus_cabc[k]:rus_cabc[32-k] for k in range(33)}\nrev_de = {rus_cabc[32-k]:rus_cabc[k] for k in range(33)}\n\ndef encrypt(text):\n newtext = \"\"\n for x in text:\n l = x.islower()\n cur = x.upper()\n if cur in rus_abc:\n el = rev_en[cur]\n newtext += el.lower() if l else el\n else:\n newtext += x\n return newtext\n\ndecrypt = lambda t: encrypt(t)\n\nif __name__ == \"__main__\":\n text = input(\"Text: \")\n print(encrypt(text))","repo_name":"OlegWock/python-ciphers","sub_path":"atbash.py","file_name":"atbash.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42461342743","text":"from contextlib import contextmanager\nfrom time import perf_counter, sleep\n\n@contextmanager\ndef timer():\n stats = dict()\n start = perf_counter()\n stats['start'] = start\n try:\n yield stats\n finally:\n end = perf_counter()\n stats['end'] = end\n stats['elapsed'] = end - start\n\nwith timer() as stats:\n sleep(3)\n\n# with timer() as stats:\n# sleep(2)\n\nprint(stats)\nprint('*********************')\nimport sys\n\n@contextmanager\ndef out_to_file(fname):\n current_stdout = sys.stdout\n print('current_stdout', current_stdout)\n file = open(fname, 'w')\n sys.stdout = file\n try:\n yield None\n finally:\n file.close()\n sys.stdout = current_stdout\n print('current stdout in finally',current_stdout)\n print('sys.stdou56t', sys.stdout)\n\nwith out_to_file('test.txt'):\n print('line 1')\n print('line 2')\n\n# print('hello')\n\n# with open('test.txt', 'r') as f:\n# print(f.readlines())\n\n# from contextlib import redirect_stdout\n\n# with open ('test2.txt', 'w') as f:\n# with redirect_stdout(f):\n# print('Look on the bright side of life')\n\n# with open('test2.txt', 'r') as f:\n# print(f.readlines())","repo_name":"Khachatur86/FredBaptisteUdemy","sub_path":"context_managers/contextmanagersusinggeneratorfunctions2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19424572880","text":"####################################################################\r\n# Pyint by Burak\r\n# 3.09.2019\r\n#\r\n# pyint.py file (MAIN PROGRAM)\r\n# Description: A 64x64 pixel painter program made in python pygame\r\n####################################################################\r\n\r\ntry:\r\n import pygame as pg\r\n from tkinter import *\r\n from tkinter import messagebox\r\n from tkinter.filedialog import askopenfilename, asksaveasfilename\r\nexcept:\r\n import install_requirements\r\n import pygame as pg\r\n from tkinter import *\r\n from tkinter import messagebox\r\n from tkinter.filedialog import askopenfilename, asksaveasfilename\r\nimport sys\r\n\r\n\r\nsys.setrecursionlimit(10000)\r\npg.init()\r\n\r\nsw, sh = 960, 850\r\nsc = (sw//2, sh//2)\r\nscreen = pg.display.set_mode((sw, sh))\r\npg.display.set_caption(\"Pyint by Burak\")\r\npg.display.set_icon(pg.image.load(\"icon.png\"))\r\n\r\nfillImage = pg.transform.scale(pg.image.load(\"imgs/fill.png\"), (40,40))\r\nbrushImage = pg.transform.scale(pg.image.load(\"imgs/brush.png\"), (25,25))\r\neraserImage = pg.transform.scale(pg.image.load(\"imgs/eraser.png\"), (25,25))\r\ndropperImage = pg.transform.scale(pg.image.load(\"imgs/eyedropper.png\"), (30,30))\r\n\r\ndef Remap(oldlow, oldhigh, newlow, newhigh, value):\r\n oldRange = (oldhigh - oldlow)\r\n newRange = (newhigh - newlow)\r\n newVal = (((value - oldlow) * newRange) / oldRange) + newlow\r\n return newVal\r\n\r\n\r\ndef draw_walls():\r\n wall_color = (50,50,50)\r\n wall_thickness = 4\r\n\r\n pg.draw.rect(screen, (150,150,150), (g1.xCount * g1.cellSize, 0, sw - g1.xCount * g1.cellSize, g1.yCount*g1.cellSize))\r\n pg.draw.rect(screen, (80,80,80), (0, g1.xCount * g1.cellSize, sw, sh-g1.yCount*g1.cellSize))\r\n\r\n pg.draw.rect(screen, wall_color, (g1.xCount * g1.cellSize, 0, wall_thickness, g1.yCount*g1.cellSize))\r\n pg.draw.rect(screen, wall_color, (0, g1.yCount*g1.cellSize-wall_thickness, sw, wall_thickness))\r\n\r\n pg.draw.rect(screen, wall_color, (0, 0, sw, wall_thickness))\r\n pg.draw.rect(screen, wall_color, (sw-wall_thickness, 0, wall_thickness, sh))\r\n pg.draw.rect(screen, wall_color, (0, 0, wall_thickness, sh))\r\n pg.draw.rect(screen, wall_color, (0, sh - wall_thickness, sw, wall_thickness))\r\n\r\n\r\nclass Cell(object):\r\n\r\n def __init__(self, size, color=[0, 0, 0]):\r\n self.size = size\r\n self.color = color\r\n self.subsurface = pg.Surface((self.size,self.size))\r\n self.subsurface.fill(self.color)\r\n self.pos = (0, 0)\r\n\r\n def change_color(self, color):\r\n self.color = color\r\n self.subsurface.fill(self.color)\r\n\r\n def Draw(self, win, x, y):\r\n self.pos = (x, y)\r\n win.blit(self.subsurface, self.pos)\r\n\r\n\r\nclass Grid(object):\r\n def __init__(self, xc, yc, csize, x, y, color=[255, 255, 255]):\r\n self.xCount = xc\r\n self.yCount = yc\r\n self.cellSize = csize\r\n self.pos = (x, y)\r\n self.color = color\r\n self.grid = []\r\n self.undoList = [[], []]\r\n\r\n for i in range(self.xCount):\r\n self.grid.append([])\r\n self.undoList[0].append([])\r\n self.undoList[1].append([])\r\n for j in range(self.yCount):\r\n self.grid[i].append(Cell(self.cellSize, self.color))\r\n self.undoList[0][i].append(self.color)\r\n self.undoList[1][i].append(self.color)\r\n\r\n def Draw(self, win):\r\n for i in range(self.xCount):\r\n for j in range(self.yCount):\r\n self.grid[i][j].Draw(win, self.pos[0]+(self.cellSize*i), self.pos[1]+(self.cellSize*j))\r\n\r\n def change_color(self, posx, posy, color):\r\n self.grid[posy][posx].change_color(color)\r\n\r\n def clean(self):\r\n for i in range(self.xCount):\r\n for j in range(self.yCount):\r\n self.grid[i][j].change_color(self.color)\r\n\r\n\r\n\r\nclass Button(object):\r\n active = False\r\n clicked = False\r\n rollOver = False\r\n\r\n def __init__(self, posX, posY, width, height, color, text=\"Button\", type=1, fontSize=25, fontColor=(0, 0, 0)):\r\n self.pos = [posX, posY]\r\n self.drawPos = self.pos.copy()\r\n self.width, self.height = width, height\r\n self.color = color\r\n self.text, self.fontSize, self.fontColor = text, fontSize, fontColor\r\n self.type = type\r\n self.subsurface = pg.Surface((self.width, self.height))\r\n self.subsurface.fill(self.color)\r\n self.font = pg.font.SysFont(None, self.fontSize)\r\n self.mes = self.font.render(self.text, True, self.fontColor)\r\n self.slideVal = 0\r\n\r\n def Draw(self, win, val=-1):\r\n if self.type == 1:\r\n if self.rollOver and not self.clicked:\r\n self.subsurface.set_alpha(100)\r\n else:\r\n self.subsurface.set_alpha(150)\r\n \r\n if self.clicked:\r\n self.subsurface.set_alpha(255)\r\n \r\n win.blit(self.subsurface, self.pos)\r\n self.subsurface.blit(self.mes, (15, self.height/3))\r\n elif self.type == 2:\r\n self.slideVal = Remap(-60,60,1,5,(self.pos[0]- self.drawPos[0]))\r\n pg.draw.rect(screen, (190,190,190), (self.drawPos[0]-100, self.drawPos[1]-30, 168, 60))\r\n pg.draw.rect(screen, (140,140,140), (self.drawPos[0]-60, self.drawPos[1]+self.height/3, 120, self.height/2))\r\n pg.draw.rect(screen, (220,220,220), (self.drawPos[0]-90, self.drawPos[1]+1, 20, 20))\r\n self.valMes = self.font.render(str(val), True, (30,30,30))\r\n win.blit(self.valMes, (self.drawPos[0]-85, self.drawPos[1]+3))\r\n win.blit(self.subsurface, (self.pos[0]-self.width/2, self.pos[1]))\r\n win.blit(self.mes, (self.drawPos[0]-90, self.drawPos[1]-25))\r\n\r\nlogo = pg.transform.scale(pg.image.load(\"logo.png\"), (280,70))\r\n\r\ncolors1 = [[0, 0, 0], [24,24,24], [48,48,48], [64,64,64], [128,128,128],[155,155,155],[200,200,200],[255,255,255],\r\n [27,38,49],[40,55,71],[46,64,83],[52,73,94],[93,109,126],[133,146,158],[174,182,191],[214,219,223],\r\n [77,86,86],[95,106,106],[113,125,126],[149,165,166],[170,183,184],[191,201,202],[213,219,219],[229,232,232],\r\n [98,101,103],[121,125,127],[144,148,151],[189,195,199],[202,207,210],[229,231,233],[248,249,249],[255,255,255],\r\n [100,30,22],[123,36,28],[146,43,33],[192,57,43],[205,97,85],[217,136,128],[230,176,170],[242,215,213],\r\n [120,40,31],[148,49,38],[176,58,46],[220,76,60],[236,112,99],[241,148,138],[245,183,177],[250,219,216],\r\n [74,35,90],[91,44,111],[108,52,131],[142,68,173],[165,105,189],[187,143,206],[210,180,222],[232,218,239],\r\n [21,67,96],[26,82,118],[31,97,141],[41,128,185],[84,153,199],[127,179,213],[169,204,227],[212,230,241],\r\n [20,90,50],[25,111,61],[34,141,84],[34,174,96],[82,190,128],[125,206,160],[169,223,191],[212,239,223],\r\n [125,102,8],[154,125,10],[183,149,11],[230,196,15],[244,208,63],[247,220,111],[249,231,159],[252,243,207],\r\n [126,81,9],[156,100,12],[185,119,14],[242,156,18],[245,176,65],[248,196,113],[250,215,160],[253,235,208],\r\n [110,44,0],[135,54,0],[160,64,0],[211,84,0],[220,118,51],[229,152,102],[237,187,153],[246,221,204]\r\n ]\r\n\r\ncolors2 = [[241,157,154],[241,179,164],[246,209,190],[252,225,213],[242,193,173],[241,175,153], #Skins\r\n [128,232,221],[124,194,246],[175,129,228],[231,132,186],[249,193,160],[183,246,175], # Soft Hues\r\n [100,93,62],[130,123,92],[156,151,115],[86,113,80],[46,71,43],[16,42,10], # Forest\r\n [252,120,150],[193,107,188],[152,89,197],[108,66,196],[85,56,193],[30,171,215], # Sunset\r\n [92,58,42],[121,84,63],[172,138,104],[200,173,139],[223,213,191],[206,159,85]] # Coffee\r\n\r\ncolorCells1 = []\r\ncolorCells2 = []\r\nfor color in colors1:\r\n colorCells1.append(Cell(20, color))\r\n\r\nfor color in colors2:\r\n colorCells2.append(Cell(25, color))\r\n\r\ncolorTitleFont = pg.font.SysFont(None, 25)\r\ncolorTitle = colorTitleFont.render(\"Color Palette\", True, (50,50,50))\r\n\r\ncolorScheme = 1\r\ncolorFont = pg.font.SysFont(None, 22)\r\ncolorTexts = [colorFont.render(\"Skin\", True, (50,50,50)), colorFont.render(\"Soft Hues\", True, (50,50,50)),\r\n colorFont.render(\"Forest\", True, (50, 50, 50)), colorFont.render(\"Sunset\", True, (50,50,50)),\r\n colorFont.render(\"Coffee\", True, (50, 50, 50))]\r\n\r\n\r\ng1 = Grid(64, 64, 12, 0, 0, [255, 255, 255])\r\nsave_b = Button(20,790,80,40, (100, 100, 100), \"Save\", 1, 24, (255,255,255))\r\nload_b = Button(110,790,80,40, (100, 100, 100), \"Load\", 1, 24, (255,255,255))\r\nexport_b = Button(200,790,80,40, (100, 100, 100), \"Export\", 1, 24, (255,255,255))\r\nSL_Buttons = [save_b, load_b, export_b]\r\n\r\nS_brushSize = Button(880, 305, 10,20, (240,240,240), \"Brush Size\", 2)\r\nS_eraserSize = Button(880, 225, 10,20, (240,240,240), \"Eraser Size\", 2)\r\nS_buttons = [S_brushSize, S_eraserSize]\r\n\r\nB_penTool = Button(825, 60, 30, 30, (80,80,80), \"\", 1)\r\nB_eraserTool = Button(875, 60, 30, 30, (80,80,80), \"\", 1)\r\nB_fillTool = Button(825, 110, 30, 30, (80,80,80), \"\", 1)\r\nB_eyeDropper = Button(875, 110, 30, 30, (80,80,80), \"\", 1)\r\n\r\nB_Buttons = [B_penTool, B_eraserTool, B_fillTool, B_eyeDropper]\r\n\r\nP_number1 = Button(900, 380, 15,15, (80,80,80), \"\")\r\nP_number1.clicked = True\r\nP_number2 = Button(920, 380, 15,15, (80,80,80), \"\")\r\nP_Buttons = [P_number1, P_number2]\r\n\r\nfileFont = pg.font.SysFont(None, 30)\r\nnameSurface = pg.Surface((370,40))\r\nnameSurface.fill(pg.Color(\"White\"))\r\nfileName = \"unnamed\"\r\n\r\nselectedTool = 0\r\nselectedToolBefore = 0\r\n\r\ncolorUsing = [128, 30, 30]\r\nselectedColor = [128,30,30]\r\nclicking = False\r\npenSize = 3\r\neraserSize = 3\r\n\r\nround = -1\r\nclock = pg.time.Clock()\r\nholdingCTRL = False\r\nundoed = False\r\nmouseRelPosX = 0\r\nmouseRelPosY = 0\r\n\r\npositions1 = []\r\npositions2 = []\r\nvisitedFillPositions = []\r\n\r\ni = 0\r\nj = 0\r\nfor color in colorCells1:\r\n positions1.append((784 + i * 20, 405 + j * 25))\r\n i += 1\r\n if i >= 8:\r\n i = 0\r\n j += 1\r\n\r\ni = 0\r\nj = 0\r\nfor color in colorCells2:\r\n positions2.append((789 + i * 25, 430 + j * 55))\r\n i += 1\r\n if i >= 6:\r\n i = 0\r\n j += 1\r\n\r\n\r\ndef draw_palette(scheme):\r\n screen.blit(colorTitle, (779, 380))\r\n pg.draw.rect(screen, (200, 200, 200), (779, 400, 170, 350))\r\n\r\n if scheme == 1:\r\n for i, color in enumerate(colorCells1):\r\n screen.blit(color.subsurface, positions1[i])\r\n elif scheme == 2:\r\n for i, color in enumerate(colorCells2):\r\n screen.blit(color.subsurface, positions2[i])\r\n\r\n for t, text in enumerate(colorTexts):\r\n screen.blit(text, (positions2[t * 6][0], positions2[t * 6][1] - 18))\r\n\r\n pg.draw.rect(screen, (235, 235, 235), (positions1[-1][0] - 82, positions1[-1][1] + 27, 35, 35))\r\n pg.draw.rect(screen, colorUsing, (positions1[-1][0] - 76, positions1[-1][1] + 33, 23, 23))\r\n\r\n\r\ndef paint(var):\r\n global mouseRelPosX, mouseRelPosY\r\n if var == 0:\r\n sizeToDraw = penSize\r\n elif var == 1:\r\n sizeToDraw = eraserSize\r\n\r\n\r\n if sizeToDraw == 1:\r\n mouseRelPosX = max(penSize - 1, min(g1.xCount - 1, int(Remap(0, (g1.cellSize * g1.xCount), 0, g1.xCount, pg.mouse.get_pos()[0]))))\r\n mouseRelPosY = max(penSize - 1, min(g1.yCount - 1, int(Remap(0, (g1.cellSize * g1.yCount), 0, g1.yCount, pg.mouse.get_pos()[1]))))\r\n g1.change_color(mouseRelPosY, mouseRelPosX, colorUsing)\r\n if sizeToDraw == 2:\r\n mouseRelPosX = max(penSize - 1, min(g1.xCount - 2, int(Remap(0, (g1.cellSize * g1.xCount), 0, g1.xCount, pg.mouse.get_pos()[0]))))\r\n mouseRelPosY = max(penSize - 1, min(g1.yCount - 2, int(Remap(0, (g1.cellSize * g1.yCount), 0, g1.yCount, pg.mouse.get_pos()[1]))))\r\n g1.change_color(mouseRelPosY, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY - 1, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY, mouseRelPosX - 1, colorUsing)\r\n g1.change_color(mouseRelPosY, mouseRelPosX + 1, colorUsing)\r\n g1.change_color(mouseRelPosY + 1, mouseRelPosX, colorUsing)\r\n if sizeToDraw == 3:\r\n mouseRelPosX = max(penSize - 2, min(g1.xCount - 2, int(Remap(0, (g1.cellSize * g1.xCount), 0, g1.xCount, pg.mouse.get_pos()[0]))))\r\n mouseRelPosY = max(penSize - 2, min(g1.yCount - 2, int(Remap(0, (g1.cellSize * g1.yCount), 0, g1.yCount, pg.mouse.get_pos()[1]))))\r\n g1.change_color(mouseRelPosY, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY - 1, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY, mouseRelPosX - 1, colorUsing)\r\n g1.change_color(mouseRelPosY, mouseRelPosX + 1, colorUsing)\r\n g1.change_color(mouseRelPosY + 1, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY + 1, mouseRelPosX + 1, colorUsing)\r\n g1.change_color(mouseRelPosY - 1, mouseRelPosX - 1, colorUsing)\r\n g1.change_color(mouseRelPosY - 1, mouseRelPosX + 1, colorUsing)\r\n g1.change_color(mouseRelPosY + 1, mouseRelPosX - 1, colorUsing)\r\n if sizeToDraw == 4:\r\n mouseRelPosX = max(penSize - 2, min(g1.xCount - 3, int(Remap(0, (g1.cellSize * g1.xCount), 0, g1.xCount, pg.mouse.get_pos()[0]))))\r\n mouseRelPosY = max(penSize - 2, min(g1.yCount - 3, int(Remap(0, (g1.cellSize * g1.yCount), 0, g1.yCount, pg.mouse.get_pos()[1]))))\r\n g1.change_color(mouseRelPosY, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY - 1, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY, mouseRelPosX - 1, colorUsing)\r\n g1.change_color(mouseRelPosY, mouseRelPosX + 1, colorUsing)\r\n g1.change_color(mouseRelPosY + 1, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY + 1, mouseRelPosX + 1, colorUsing)\r\n g1.change_color(mouseRelPosY - 1, mouseRelPosX - 1, colorUsing)\r\n g1.change_color(mouseRelPosY - 1, mouseRelPosX + 1, colorUsing)\r\n g1.change_color(mouseRelPosY + 1, mouseRelPosX - 1, colorUsing)\r\n g1.change_color(mouseRelPosY, mouseRelPosX - 2, colorUsing)\r\n g1.change_color(mouseRelPosY, mouseRelPosX + 2, colorUsing)\r\n g1.change_color(mouseRelPosY + 2, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY - 2, mouseRelPosX, colorUsing)\r\n if sizeToDraw == 5:\r\n mouseRelPosX = max(penSize - 3, min(g1.xCount - 3, int(Remap(0, (g1.cellSize * g1.xCount), 0, g1.xCount, pg.mouse.get_pos()[0]))))\r\n mouseRelPosY = max(penSize - 3, min(g1.yCount - 3, int(Remap(0, (g1.cellSize * g1.yCount), 0, g1.yCount, pg.mouse.get_pos()[1]))))\r\n g1.change_color(mouseRelPosY, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY - 1, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY, mouseRelPosX - 1, colorUsing)\r\n g1.change_color(mouseRelPosY, mouseRelPosX + 1, colorUsing)\r\n g1.change_color(mouseRelPosY + 1, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY + 1, mouseRelPosX + 1, colorUsing)\r\n g1.change_color(mouseRelPosY - 1, mouseRelPosX - 1, colorUsing)\r\n g1.change_color(mouseRelPosY - 1, mouseRelPosX + 1, colorUsing)\r\n g1.change_color(mouseRelPosY + 1, mouseRelPosX - 1, colorUsing)\r\n g1.change_color(mouseRelPosY, mouseRelPosX - 2, colorUsing)\r\n g1.change_color(mouseRelPosY + 1, mouseRelPosX - 2, colorUsing)\r\n g1.change_color(mouseRelPosY + 2, mouseRelPosX - 2, colorUsing)\r\n g1.change_color(mouseRelPosY - 1, mouseRelPosX - 2, colorUsing)\r\n g1.change_color(mouseRelPosY - 2, mouseRelPosX - 2, colorUsing)\r\n g1.change_color(mouseRelPosY, mouseRelPosX + 2, colorUsing)\r\n g1.change_color(mouseRelPosY + 1, mouseRelPosX + 2, colorUsing)\r\n g1.change_color(mouseRelPosY + 2, mouseRelPosX + 2, colorUsing)\r\n g1.change_color(mouseRelPosY - 1, mouseRelPosX + 2, colorUsing)\r\n g1.change_color(mouseRelPosY - 2, mouseRelPosX + 2, colorUsing)\r\n g1.change_color(mouseRelPosY + 2, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY + 2, mouseRelPosX - 1, colorUsing)\r\n g1.change_color(mouseRelPosY + 2, mouseRelPosX + 1, colorUsing)\r\n g1.change_color(mouseRelPosY - 2, mouseRelPosX, colorUsing)\r\n g1.change_color(mouseRelPosY - 2, mouseRelPosX - 1, colorUsing)\r\n g1.change_color(mouseRelPosY - 2, mouseRelPosX + 1, colorUsing)\r\n\r\n\r\ndef neighbours(x,y):\r\n return [[x+1,y],[x-1,y],[x,y+1],[x,y-1]]\r\n\r\n\r\ndef fill(gridObject, posX, posY, colorNow, fillColor):\r\n\r\n if posX < 0 or posY < 0:\r\n\r\n return\r\n if posX >= gridObject.xCount or posY >= gridObject.yCount:\r\n\r\n return\r\n if gridObject.grid[posX][posY].color != colorNow:\r\n return\r\n if [posX, posY] in visitedFillPositions:\r\n return\r\n\r\n\r\n visitedFillPositions.append([posX, posY])\r\n gridObject.change_color(posY, posX, fillColor)\r\n moves = neighbours(posX, posY)\r\n for move in moves:\r\n fill(gridObject, move[0], move[1], colorNow, fillColor)\r\n\r\n\r\ndef tool_activate(toolIndex):\r\n global colorUsing\r\n if toolIndex == 0:\r\n colorUsing = selectedColor.copy()\r\n if toolIndex == 1:\r\n colorUsing = g1.color.copy()\r\n if toolIndex == 2:\r\n colorUsing = selectedColor.copy()\r\n if toolIndex == 3:\r\n colorUsing = selectedColor.copy()\r\n\r\n\r\ndef Capture(display,name,pos,size): # (pygame Surface, String, tuple, tuple)\r\n image = pg.Surface(size) # Create image surface\r\n image.blit(display,(0,0),(pos,size)) # Blit portion of the display to the image\r\n pg.image.save(image,name) # Save the image to the disk\r\n\r\n\r\ndef FileManager(var):\r\n global fileExtension\r\n window = Tk()\r\n window.withdraw()\r\n if var != 2:\r\n availableFormats = [(\"Windows Text File\", \"*.txt\")]\r\n else:\r\n availableFormats = [(\"Portable Network Graphics\", \"*.png\")]\r\n\r\n if var == 0:\r\n filename = askopenfilename(title=\"Open File\", filetypes=availableFormats)\r\n elif var == 1:\r\n filename = asksaveasfilename(title=\"Save File\", filetypes=availableFormats)\r\n elif var == 2:\r\n filename = asksaveasfilename(title=\"Export File\", filetypes=availableFormats)\r\n\r\n if filename:\r\n name = filename[:]\r\n return name\r\n\r\n\r\ndef SaveFile(gridObject, filePath):\r\n global fileName\r\n\r\n if filePath:\r\n if len(filePath) >= 4: # This just makes sure we have .txt at the end of our file selection\r\n if filePath[-4:] != '.txt':\r\n filePath = filePath + '.txt'\r\n else:\r\n filePath = filePath + '.txt'\r\n\r\n file = open(filePath, \"w\")\r\n\r\n for row in range(len(gridObject.grid)):\r\n for pixel in gridObject.grid[row]:\r\n colorVal = str(pixel.color[0]) + \",\" + str(pixel.color[1]) + \",\" + str(pixel.color[2])\r\n file.write(colorVal + \"\\n\")\r\n\r\n file.close()\r\n\r\n filePathList = filePath.split(\"/\")\r\n fileName = filePathList[-1]\r\n pg.display.set_caption(\"Pyint by Burak - \" + fileName)\r\n\r\n\r\ndef OpenFile(filePath):\r\n global g1, fileName\r\n\r\n if filePath:\r\n file = open(filePath, \"r\")\r\n colors = file.readlines()\r\n\r\n\r\n line = 0\r\n for i in range(g1.yCount):\r\n for j in range(g1.xCount):\r\n colorVal = []\r\n colorVal = colors[line].split(\",\")\r\n finalColorVal = [int(colorVal[0]),int(colorVal[1]),int(colorVal[2])]\r\n print(colorVal, \"\\n\", finalColorVal)\r\n line+=1\r\n g1.change_color(j,i,finalColorVal)\r\n\r\n file.close()\r\n filePathList = filePath.split(\"/\")\r\n fileName = filePathList[-1]\r\n pg.display.set_caption(\"Pyint by Burak - \" + fileName)\r\n\r\ndef key_event_up(event):\r\n global penSize, undoed, holdingCTRL, colorScheme, selectedTool\r\n\r\n\r\n if event.key == pg.K_1:\r\n colorScheme = 1\r\n elif event.key == pg.K_2:\r\n colorScheme = 2\r\n\r\n if event.key == pg.K_e:\r\n selectedTool = 1\r\n B_Buttons[1].clicked = True\r\n for subbutton in B_Buttons:\r\n if B_Buttons.index(subbutton) != selectedTool:\r\n subbutton.clicked = False\r\n elif event.key == pg.K_b:\r\n selectedTool = 0\r\n B_Buttons[0].clicked = True\r\n for subbutton in B_Buttons:\r\n if B_Buttons.index(subbutton) != selectedTool:\r\n subbutton.clicked = False\r\n elif event.key == pg.K_g:\r\n selectedTool = 2\r\n B_Buttons[2].clicked = True\r\n for subbutton in B_Buttons:\r\n if B_Buttons.index(subbutton) != selectedTool:\r\n subbutton.clicked = False\r\n elif event.key == pg.K_i:\r\n selectedTool = 3\r\n B_Buttons[3].clicked = True\r\n for subbutton in B_Buttons:\r\n if B_Buttons.index(subbutton) != selectedTool:\r\n subbutton.clicked = False\r\n\r\n if event.key == pg.K_LCTRL:\r\n holdingCTRL = False\r\n\r\n if event.key == pg.K_SPACE:\r\n if holdingCTRL:\r\n g1.clean()\r\n undoed = True\r\n\r\n if event.key == pg.K_s:\r\n if holdingCTRL:\r\n shortcutPath = FileManager(1)\r\n SaveFile(g1, shortcutPath)\r\n\r\n if event.key == pg.K_z:\r\n if holdingCTRL:\r\n\r\n for i in range(g1.yCount):\r\n for j in range(g1.xCount):\r\n if round == 1:\r\n g1.change_color(j, i, g1.undoList[1][i][j])\r\n if round == -1:\r\n g1.change_color(j, i, g1.undoList[0][i][j])\r\n undoed = True\r\n\r\n\r\nwhile True:\r\n clock.tick(240)\r\n\r\n if undoed:\r\n for i in range(g1.xCount):\r\n for j in range(g1.yCount):\r\n g1.undoList[0][i][j] = g1.grid[i][j].color\r\n g1.undoList[1][i][j] = g1.grid[i][j].color\r\n undoed = False\r\n\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n sys.exit()\r\n\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n if event.button == 3:\r\n selectedToolBefore = selectedTool\r\n selectedTool = 1\r\n elif event.button == 1:\r\n if pg.mouse.get_pos()[0] < g1.xCount*g1.cellSize and pg.mouse.get_pos()[1] < g1.yCount*g1.cellSize:\r\n if selectedTool == 0 or selectedTool == 1:\r\n paint(selectedTool)\r\n clicking = True\r\n elif selectedTool == 2:\r\n mouseRelPosX = max(0, min(g1.xCount - 1, int(Remap(0, (g1.cellSize * g1.xCount), 0, g1.xCount, pg.mouse.get_pos()[0]))))\r\n mouseRelPosY = max(0, min(g1.yCount - 1, int(Remap(0, (g1.cellSize * g1.yCount), 0, g1.yCount, pg.mouse.get_pos()[1]))))\r\n\r\n visitedFillPositions.clear()\r\n\r\n fill(g1, mouseRelPosX, mouseRelPosY, g1.grid[mouseRelPosX][mouseRelPosY].color, selectedColor)\r\n elif selectedTool == 3:\r\n mouseRelPosX = max(0, min(g1.xCount - 1, int(Remap(0, (g1.cellSize * g1.xCount), 0, g1.xCount, pg.mouse.get_pos()[0]))))\r\n mouseRelPosY = max(0, min(g1.yCount - 1, int(Remap(0, (g1.cellSize * g1.yCount), 0, g1.yCount, pg.mouse.get_pos()[1]))))\r\n\r\n selectedColor = g1.grid[mouseRelPosX][mouseRelPosY].color\r\n\r\n else:\r\n if colorScheme == 1:\r\n for i, Scolor in enumerate(colorCells1):\r\n if Scolor.subsurface.get_rect(topleft=positions1[i]).collidepoint(pg.mouse.get_pos()):\r\n selectedColor = Scolor.color\r\n elif colorScheme == 2:\r\n for i, Scolor in enumerate(colorCells2):\r\n if Scolor.subsurface.get_rect(topleft=positions2[i]).collidepoint(pg.mouse.get_pos()):\r\n selectedColor = Scolor.color\r\n for but in S_buttons:\r\n if but.subsurface.get_rect(topleft=(but.pos[0]-but.width/2, but.pos[1])).collidepoint(pg.mouse.get_pos()):\r\n but.active = True\r\n else:\r\n but.active = False\r\n for i,but in enumerate(SL_Buttons):\r\n if but.rollOver:\r\n if i == 0:\r\n cPath = FileManager(1)\r\n SaveFile(g1, cPath)\r\n elif i == 1:\r\n cPath = FileManager(0)\r\n OpenFile(cPath)\r\n elif i == 2:\r\n cPath = FileManager(2)\r\n if cPath:\r\n Capture(screen, cPath + \".png\", (4,4), (764,760))\r\n fileName = cPath.split(\"/\")[-1] + \".png\"\r\n\r\n for but in B_Buttons:\r\n if but.rollOver:\r\n but.clicked = True\r\n selectedTool = B_Buttons.index(but)\r\n for subbutton in B_Buttons:\r\n if B_Buttons.index(subbutton) != selectedTool:\r\n subbutton.clicked = False\r\n for but in P_Buttons:\r\n if but.rollOver:\r\n but.clicked = True\r\n colorScheme = P_Buttons.index(but)+1\r\n for subbutton in P_Buttons:\r\n if P_Buttons.index(subbutton) != selectedTool:\r\n subbutton.clicked = False\r\n\r\n if event.type == pg.MOUSEBUTTONUP:\r\n if event.button == 3:\r\n selectedTool = selectedToolBefore\r\n elif event.button == 1:\r\n for i in range(g1.xCount):\r\n for j in range(g1.yCount):\r\n if round == -1:\r\n g1.undoList[0][i][j] = g1.grid[i][j].color\r\n if round == 1:\r\n g1.undoList[1][i][j] = g1.grid[i][j].color\r\n round *= -1\r\n clicking = False\r\n\r\n for but in S_buttons:\r\n but.active = False\r\n\r\n if event.type == pg.MOUSEMOTION:\r\n if pg.mouse.get_pos()[0] < g1.xCount * g1.cellSize and pg.mouse.get_pos()[1] < g1.yCount * g1.cellSize:\r\n pg.mouse.set_visible(False)\r\n else:\r\n pass\r\n pg.mouse.set_visible(True)\r\n if clicking:\r\n if pg.mouse.get_pos()[0] < g1.xCount * g1.cellSize and pg.mouse.get_pos()[1] < g1.yCount * g1.cellSize:\r\n paint(selectedTool)\r\n else:\r\n for but in SL_Buttons:\r\n if but.subsurface.get_rect(topleft=but.pos).collidepoint(pg.mouse.get_pos()):\r\n but.rollOver = True\r\n else:\r\n but.rollOver = False\r\n for but in B_Buttons:\r\n if but.subsurface.get_rect(topleft=but.pos).collidepoint(pg.mouse.get_pos()):\r\n but.rollOver = True\r\n else:\r\n but.rollOver = False\r\n for but in S_buttons:\r\n if but.active:\r\n but.pos[0] = max(but.drawPos[0]-60, min(pg.mouse.get_pos()[0], but.drawPos[0]+60))\r\n else:\r\n but.active = False\r\n for but in P_Buttons:\r\n if but.subsurface.get_rect(topleft=but.pos).collidepoint(pg.mouse.get_pos()):\r\n but.rollOver = True\r\n else:\r\n but.rollOver = False\r\n\r\n if event.type == pg.KEYDOWN:\r\n if event.key == pg.K_LCTRL:\r\n holdingCTRL = True\r\n\r\n if event.type == pg.KEYUP:\r\n key_event_up(event)\r\n\r\n tool_activate(selectedTool)\r\n\r\n screen.fill((255, 255, 255))\r\n g1.Draw(screen)\r\n draw_walls()\r\n \r\n for but in SL_Buttons:\r\n but.Draw(screen)\r\n\r\n screen.blit(colorTitleFont.render(\"Tools\", True, (50,50,50)), (779, 30))\r\n pg.draw.rect(screen, (180,180,180), (779, 50, 170, 100))\r\n for but in B_Buttons:\r\n but.Draw(screen)\r\n\r\n for but in P_Buttons:\r\n but.Draw(screen)\r\n\r\n screen.blit(colorTitleFont.render(\"Size Settings\", True, (50, 50, 50)), (779, 170))\r\n S_brushSize.Draw(screen, penSize)\r\n S_eraserSize.Draw(screen, eraserSize)\r\n penSize = int(S_brushSize.slideVal)\r\n eraserSize = int(S_eraserSize.slideVal)\r\n\r\n screen.blit(nameSurface, (310, 790))\r\n nameText = fileFont.render(fileName, True, (0, 0, 0))\r\n screen.blit(nameText, (320,sh-50))\r\n screen.blit(logo, (680, 775))\r\n\r\n screen.blit(pg.transform.scale(fillImage, (22,22)), (B_fillTool.pos[0]+3, B_fillTool.pos[1]+2))\r\n screen.blit(pg.transform.scale(eraserImage, (22,22)), (B_eraserTool.pos[0]+3, B_eraserTool.pos[1]+2))\r\n screen.blit(pg.transform.scale(brushImage, (22,22)), (B_penTool.pos[0]+3, B_penTool.pos[1]+2))\r\n screen.blit(pg.transform.scale(dropperImage, (22,22)), (B_eyeDropper.pos[0]+3, B_eyeDropper.pos[1]+2))\r\n\r\n draw_palette(colorScheme)\r\n\r\n if selectedTool == 0:\r\n if pg.mouse.get_pos()[0] < g1.xCount * g1.cellSize and pg.mouse.get_pos()[1] < g1.yCount * g1.cellSize:\r\n pg.draw.circle(screen, colorUsing, (pg.mouse.get_pos()), penSize * 8, 1)\r\n elif selectedTool == 1:\r\n if pg.mouse.get_pos()[0] < g1.xCount * g1.cellSize and pg.mouse.get_pos()[1] < g1.yCount * g1.cellSize:\r\n pg.draw.circle(screen, (50,50,50), (pg.mouse.get_pos()), eraserSize * 8, 1)\r\n elif selectedTool == 2:\r\n if pg.mouse.get_pos()[0] < g1.xCount * g1.cellSize and pg.mouse.get_pos()[1] < g1.yCount * g1.cellSize:\r\n screen.blit(fillImage, (pg.mouse.get_pos()[0], pg.mouse.get_pos()[1]-35))\r\n elif selectedTool == 3:\r\n if pg.mouse.get_pos()[0] < g1.xCount * g1.cellSize and pg.mouse.get_pos()[1] < g1.yCount * g1.cellSize:\r\n screen.blit(dropperImage, (pg.mouse.get_pos()[0], pg.mouse.get_pos()[1]-30))\r\n\r\n\r\n\r\n pg.display.update()\r\n","repo_name":"Burakcoli/Pyint_Pixel-Painter","sub_path":"pyint.py","file_name":"pyint.py","file_ext":"py","file_size_in_byte":30316,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"81"} +{"seq_id":"35186942521","text":"#\n# @lc app=leetcode.cn id=2325 lang=python3\n#\n# [2325] 解密消息\n#\n\n# @lc code=start\nclass Solution:\n def decodeMessage(self, key: str, message: str) -> str:\n ktable, kidx = {' ': ' '}, ord('a') \n for i in key:\n if not i in ktable:\n ktable[i] = chr(kidx)\n kidx += 1\n return \"\".join(ktable[i] for i in message) \n \n# @lc code=end\n\nprint(Solution().decodeMessage(key = \"eljuxhpwnyrdgtqkviszcfmabo\", message = \"zwx hnfx lqantp mnoeius ycgk vcnjrdb\"))","repo_name":"HellOwhatAs/Leetcode","sub_path":"2325.解密消息.py","file_name":"2325.解密消息.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29110856601","text":"#!/usr/bin/python3\nimport os\nimport sys\nimport re\nimport time\n\ncom_rep = ' '\n\ndef source_code(char):\n if char == '/':\n return comment_begin, ''\n return source_code, char\n\ndef comment_begin(char):\n if char == '/':\n return inline_comment, 2*com_rep\n if char == '*':\n return block_comment, 2*com_rep\n return source_code, '/'+char\n\ndef inline_comment(char):\n if char == '\\n':\n return source_code, '\\n'\n return inline_comment, com_rep\n\ndef block_comment(char):\n if char == '*':\n return end_block_comment, com_rep\n if(char == '\\n'):\n return block_comment, '\\n'\n return block_comment, com_rep\n\ndef end_block_comment(char):\n if char == '/':\n return source_code, com_rep\n if(char == '\\n'):\n return block_comment, '\\n'\n return block_comment, com_rep\n\ndef remove_comments(content):\n def gen_content():\n parser = source_code\n for character in content:\n parser, text = parser(character)\n yield text\n\n return ''.join(gen_content())\n\ndef col_regions(content, com_rem):\n p = re.compile(\"/\\*[lr][wypg]\\*/\");#len 6\n col_stack = ['w']\n exist = {'w':True, 'y':False, 'p':False, 'g':False}\n res = {'w':\"\", 'y':\"\", 'p':\"\", 'g':\"\"}\n while len(content) > 0:\n if p.match(content):\n if content[2] == 'r':\n if content[3] != col_stack[-1]:\n sys.stderr.write(f'ERROR in coloring\\n')\n else:\n col_stack.pop()\n else:\n col_stack.append(content[3])\n exist[content[3]] = True\n content = content[6:]\n com_rem = com_rem[6:]\n else:\n c = com_rem[0]\n content = content[1:]\n com_rem = com_rem[1:]\n res['g'] += c\n if col_stack[-1] == 'g' and c != '\\n':\n c = ' ' \n res['p'] += c \n if col_stack[-1] == 'p' and c != '\\n':\n c = ' ' \n res['y'] += c \n if col_stack[-1] == 'y' and c != '\\n':\n c = ' '\n res['w'] += c\n fin_res = {}\n for elem in exist.items():\n if elem[1]:\n fin_res[elem[0]] = res[elem[0]]\n return fin_res\n\ndef rem_col_codes(content):\n p = re.compile(\"/\\*[lr][wypg]\\*/\");#len 6\n res = \"\"\n while len(content) > 0:\n if p.match(content):\n content = content[6:]\n else:\n res += content[0]\n content = content[1:]\n return res\n\n\ndef calc_hash(content):\n fout = open(\"cksum.in\", \"w\")\n fout.write(content)\n fout.close()\n os.system(\"bash crc.sh cksum.in 999 999 | tail -1 >cksum.out\")\n fin = open(\"cksum.out\", \"r\")\n res = fin.readline().rstrip()\n fin.close()\n return res\n\ndef add_hashes(line, act_line, hashes):\n hash_room_left = (47-len(act_line))//5\n line_end = \"\"\n added = []\n for elem in hashes:\n col, id, hash = elem\n if hash_room_left > 0:\n hash_room_left -= 1\n added.append(hash)\n line_end += \"/*l\"+col+\"*/\"\n line_end += id+hash\n line_end += \"/*r\"+col+\"*/\"\n if(len(added) > 0):\n line = line.rstrip()\n line += '/*lw*/'+(57-len(act_line)-5*len(added))*' '+'/*rw*/'+line_end\n return (line, added)\n\ndef process(content, col_hash = True):\n com_rem = remove_comments('\\n'.join(content)).split('\\n')\n act_out = rem_col_codes('\\n'.join(content)).split('\\n')\n col_versions = {}\n for elem in col_regions('\\n'.join(content), '\\n'.join(com_rem)).items():\n col_versions[elem[0]] = elem[1].split('\\n')\n keys = []\n for key in \"wypg\":\n if key in col_versions:\n keys.append(key)\n if not col_hash:\n keys = keys[-1:]\n since_hash = [0 for key in keys]\n res = \"\"\n for i in range(len(content)-1):\n hashes = [calc_hash('\\n'.join(col_versions[key][:i+1])) for key in keys]\n for j in range(len(keys)):\n since_hash[j] += len(''.join(col_versions[keys[j]][i].split()))\n write_hash = [False for key in keys]\n chars_between = 70\n for j in range(len(keys)-1, -1, -1):\n if(since_hash[j] >= chars_between):\n write_hash[j] = True\n if(j != 0 and write_hash[j] and hashes[j] == hashes[j-1]):\n write_hash[j] = False\n write_hash[j-1] = True \n hash_querys = []\n for j in range(len(keys)):\n if(write_hash[j]):\n if col_hash:\n hash_querys.append((keys[j], ' ', hashes[j]))\n else:\n hash_querys.append((\"w\", ' ', hashes[j]))\n content[i], written_hashes = add_hashes(content[i], act_out[i], hash_querys)\n for j in range(len(keys)):\n if hashes[j] in written_hashes:\n since_hash[j] = 0\n hash_querys = []\n for j in range(len(keys)):\n if col_hash:\n hash_querys.append((keys[j], '%', hashes[j]))\n else:\n hash_querys.append((\"w\", '%', hashes[j]))\n content[-1], written_hashes = add_hashes(content[-1], act_out[-1], hash_querys)\n return content\n\n\nfin = open(\"file_ord.txt\", \"r\")\nfile_ord = fin.readlines()\nfin.close()\nwith open('header.html', 'r') as file:\n full_html = file.read()\nfull_tex = \"\"\n\nfor file in file_ord:\n print(file)\n file = file.rstrip()\n \n lang = \"tex\"\n if(file[-4:] == \".cpp\" or file[-4:] == \".hpp\" or file[-2:] == \".c\" or file[-2:] == \".h\" ):\n lang = \"c++\"\n elif(file[-3:] == \".sh\" ):\n lang = \"bash\"\n else:\n continue\n outputting = False\n hashing = False\n escape_str = \"!escape \"\n beg_str = \"!begin_codebook\"\n end_str = \"!end_codebook\"\n beg_hash_str = \"//!start\"\n end_hash_str = \"//!finish\"\n no_col_hash_str = \"//!no_col_hash\"\n pause_str = \"//!pause\"\n unpause_str = \"//!unpause\"\n loc_html_beg_str = '
    '\n    loc_html_end_str = \"
    \\n\"\n col_hash = True\n\n\n processed = \"\"\n portion = []\n fin = open(file, \"r\")\n for line in fin.readlines():\n line = line.rstrip()\n escape_idx = line.find(escape_str)\n beg_idx = line.find(beg_str)\n end_idx = line.find(end_str)\n hash_beg_idx = line.find(beg_hash_str)\n hash_end_idx = line.find(end_hash_str)\n no_col_hash_idx = line.find(no_col_hash_str)\n pause_idx = line.find(pause_str)\n unpause_idx = line.find(unpause_str)\n if(escape_idx != -1):\n line = line[escape_idx+len(escape_str):]\n full_html += '
    \\n'\n full_html += line\n full_html += '\\n
    \\n'\n elif(beg_idx != -1):\n outputting = True\n elif(end_idx != -1):\n outputting = False\n if len(portion) > 0:\n processed += '\\n'.join(portion)+'\\n'\n portion = []\n elif(hash_beg_idx != -1):\n hashing = True\n elif(hash_end_idx != -1):\n hashing = False\n if len(portion) > 0:\n processed += '\\n'.join(process(portion, col_hash))+'\\n'\n portion = []\n elif(no_col_hash_idx != -1):\n col_hash = False\n elif(pause_idx != -1):\n outputting = False\n elif(unpause_idx != -1):\n outputting = True;\n elif(outputting and not line.isspace() and len(line) > 0):\n portion.append(line)\n fout = open(\"tmp_source.\"+lang, \"w\");\n fout.write(processed)\n fout.close()\n os.system(\"pygmentize -O full,style=tartu_icpc -o tmp_source.html tmp_source.\"+lang)\n with open('tmp_source.html', 'r') as file:\n loc_html = file.read()\n loc_html_beg = loc_html.find(loc_html_beg_str)+len(loc_html_beg_str)\n loc_html_end = loc_html.find(loc_html_end_str)\n full_html += loc_html[loc_html_beg:loc_html_end]\n\n\nwith open('footer.html', 'r') as file:\n full_html += file.read()\nfout = open(\"codebookpart2.html\", \"w\")\nfout.write(full_html)\nfout.close()\n","repo_name":"quartzcream/tartu_icpc","sub_path":"create_codebook2.py","file_name":"create_codebook2.py","file_ext":"py","file_size_in_byte":8084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31380285952","text":"def FuncName(x,y):\n z=x+y\n return z\n\ndef isPalindrome(mystr):\n str_len_half=len(mystr)//2\n for i in range(0,(str_len_half)):\n j=len(mystr)-i-1\n if mystr[i]!=mystr[j]:\n return False\n return True\n\nmyname=\"ABcBcBA\"\nmyname_isPalindrome=isPalindrome(myname)\nprint(myname_isPalindrome)\nmyname2=\"sdkjfadsf\"\nprint(isPalindrome(myname2))\n","repo_name":"ohad1s/Intro_to_Python","sub_path":"SemA/tirgul_6/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16493984579","text":"from random import randint, choice as rc\n\nfrom faker import Faker\n\nfrom app import app\nfrom models import db, Planet, Scientist, Mission\n\nfake = Faker()\n\n\ndef create_planets():\n planets = []\n for _ in range(20):\n p = Planet(\n name=fake.first_name(),\n distance_from_earth=str(randint(100000, 10000000000)),\n nearest_star=fake.first_name(),\n )\n planets.append(p)\n\n return planets\n\n\ndef create_scientists():\n scientists = []\n names = []\n for _ in range(5):\n name = fake.name()\n while name in names:\n name = fake.name()\n names.append(name)\n\n s = Scientist(\n name=name,\n field_of_study=fake.sentence(),\n )\n scientists.append(s)\n\n return scientists\n\n\ndef create_missions(planets, scientists):\n missions = []\n for _ in range(20):\n m = Mission(\n name=fake.sentence(nb_words=3),\n planet_id=rc(planets).id,\n scientist_id=rc(scientists).id\n )\n missions.append(m)\n return missions\n\n\nif __name__ == '__main__':\n\n with app.app_context():\n print(\"Clearing db...\")\n Planet.query.delete()\n Scientist.query.delete()\n Mission.query.delete()\n\n print(\"Seeding planets...\")\n planets = create_planets()\n db.session.add_all(planets)\n db.session.commit()\n\n print(\"Seeding scientists...\")\n scientists = create_scientists()\n db.session.add_all(scientists)\n db.session.commit()\n\n print(\"Seeding missions...\")\n missions = create_missions(planets, scientists)\n db.session.add_all(missions)\n db.session.commit()\n\n print(\"Done seeding!\")\n","repo_name":"morgvanny/WEST-SE-073123-Lectures","sub_path":"phase-4/07-python-p4-mock-challenge-cosmic-challenge/server/seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37146943950","text":"#LOGISTIC REGRESSION\n\nfrom PIL import Image\nimport numpy as np\nimport sys\nimport os\nimport csv\nimport pandas as pd\nfrom sys import argv\nfrom scipy.linalg import eigh\n\n#making of train data\npath=sys.argv[1]\nfp =open(path,\"r\")\nfilelist=[]\nlabels=[]\nfor line in fp:\n a=line.split(\" \")\n filelist.append(a[0]) \n labels.append(a[1].rstrip(\"\\n\"))\n\nimageset=[]\nlabels_name=dict()\nit=0\ny_train=[]\nfor filepath in filelist:\n im1 = Image.open(filepath)\n newsize = (64,64) \n im1 = im1.resize(newsize) \n # Make image Greyscale\n im1 = im1.convert('L')\n # Save Greyscale values\n value = np.asarray(im1.getdata(), dtype=np.int)\n value = value.flatten()\n imageset.append(value)\n t=filepath.split('/')\n p=t[-1].split('_')\n y_train.append(int(p[0]))\n labels_name[int(p[0])]=labels[it]\n it+=1\n\n#making of test data\npath=sys.argv[2]\nfp =open(path,\"r\")\nfilelist1=[]\nfor line in fp:\n filelist1.append(line.rstrip(\"\\n\")) \n\nimageset1=[]\ny_test=[]\nfor filepath in filelist1:\n im1 = Image.open(filepath)\n newsize = (64,64) \n im1 = im1.resize(newsize) \n# Make image Greyscale\n im1 = im1.convert('L')\n# Save Greyscale values\n value = np.asarray(im1.getdata(), dtype=np.int)\n value = value.flatten()\n imageset1.append(value)\n t=filepath.split('/')\n p=t[-1].split('_')\n y_test.append(int(p[0]))\n\n#converting into numpy array\ntrain=np.asarray(imageset)\n\n# print(len(imageset1))\ntest=np.asarray(imageset1)\n# print(test.shape)\n\n#Normalising Data using Standard Scaler\nfrom sklearn.preprocessing import StandardScaler\nX_train=StandardScaler().fit_transform(train)\nX_test=StandardScaler().fit_transform(test)\n\n#PCA Function\n\ndef PCA(A,K):\n covarMat=np.cov(A.T)\n values, vectors=eigh(covarMat)\n idx= np.argsort(values)\n idx = idx[::-1]\n eigvec = vectors[:,idx]\n eigval = values[idx]\n subeigvec=eigvec[:,:K]\n newdata=np.dot(A,subeigvec)\n return newdata\n\n#Applying PCA\nK=400\nX_train=PCA(X_train,K)\nX_test=PCA(X_test,K)\n#converting into numpy array\nX_train=np.array(X_train)\nX_test=np.array(X_test)\n#appending column of 1's in train data\nli_Train=[1.0]*(X_train.shape[0])\nli_Train=np.array(li_Train)\nli_Train.shape=(X_train.shape[0],1)\n# print(li_Train.shape,X_train.shape)\nX_train= np.hstack((li_Train,X_train))\n#appending column of 1's in test data\nli_Test=[1.0]*(X_test.shape[0])\nli_Test=np.array(li_Test)\nli_Test.shape=(X_test.shape[0],1)\nX_test= np.hstack((li_Test,X_test))\n\ndef logisticfun(z):\n return 1.0 / (1 + np.exp(-z))\n\ndef fit(parameters,X_train,y_train):\n alpha=0.02\n for j in range(0,1000):\n z=np.dot(X_train,parameters)\n pred_list=logisticfun(z)\n predlist=np.asarray(pred_list)\n predlist=np.subtract(predlist,y_train)\n df1=X_train.transpose()\n intermediate= np.dot(df1,predlist)\n for i in range(0,401):\n intermediate[i]=intermediate[i]*(1/y_train.shape[0])\n intermediate*=alpha\n parameters=np.subtract(parameters,intermediate)\n return parameters\n\n#creating parameters list\nparameters = np.zeros(X_train.shape[1])\nparameters=np.expand_dims(parameters, axis=1)\n\n#Training model and finding parameters for multiclass classification\nRparameters=[]\ny_train = np.asarray(y_train)\n# print(y_train.shape)\nfor i in range(0,8):\n temp=y_train\n for j in range(0,X_train.shape[0]):\n if(y_train[j]==i):\n temp[j]=1\n else:\n temp[j]=0\n temp = np.asarray(temp) \n temp.shape=(y_train.shape[0],1)\n Rparameters.append(fit(parameters,X_train,temp))\n\n#Returning predicted list for Xtest\ndef predict_prob(X_test,Rparameters):\n problist=[]\n for k in range(0,X_test.shape[0]):\n temp=[]\n for i in range(0,8):\n temp.append(logisticfun(np.dot(X_test[k,:],Rparameters[i])))\n # print(temp)\n maxpos = temp.index(max(temp))\n # print(maxpos)\n problist.append(maxpos)\n return problist\n\nt_predlist=predict_prob(X_test,Rparameters)\noutput=[]\n\nfor i in range(len(t_predlist)):\n output.append(labels_name[t_predlist[i]])\n\nfor b in output:\n print(b)\n\n\n\n\n\n\n","repo_name":"Aditya-crypto/LogisticRegression-From-Scratch","sub_path":"logistic regression.py","file_name":"logistic regression.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"26524811825","text":"\"\"\"\ninternal > setup.py\n\nThis script helps with initialising the script, and setting up the event detector object.\n\nAuthor: Miguel Guthridge\n\"\"\"\n\nimport ui\n\nfrom .. import consts\nfrom ..windowstate import window\n\nimport helpers\nimport eventconsts\nimport deviceconfig\nimport config\n\nimport device\n\nclass InitState:\n state = consts.INIT_INCOMPLETE\n\n def getVal(self):\n return self.state\n \n def setVal(self, new_val):\n if self.state == consts.INIT_SETUP and new_val == consts.INIT_SUCCESS:\n print(\"\")\n print(\"Setup complete!\")\n print(\"\")\n self.state = new_val\n\n def __eq__(self, other):\n return self.state == other\n \ninitState = InitState()\n\nclass Learner:\n current = [0, 0]\n \n def setCurrent(self, new_type, new_control, message, can_skip=False):\n self.current = (new_type, new_control)\n print(\"\")\n print(message)\n if can_skip:\n print(\"If your controller doesn't have this, press the stop button\")\n \n def __getitem__(self, key):\n return self.current[key]\n\nlearn = Learner()\n\ndef initialise():\n \n device_name = device.getName()\n \n print(helpers.getLineBreak())\n print(consts.SCRIPT_NAME)\n print(\"By \" + consts.SCRIPT_AUTHOR)\n print(helpers.getLineBreak())\n print(\"Version \" + str(consts.SCRIPT_VERSION_MAJOR) + \".\" + str(consts.SCRIPT_VERSION_MINOR) + \".\" + str(consts.SCRIPT_VERSION_REVISION)\n + \" \" + consts.SCRIPT_VERSION_SUFFIX)\n print(helpers.getLineBreak())\n print(\"Running on \\\"\" + device_name + \"\\\"\")\n print(helpers.getLineBreak())\n print(helpers.getLineBreak())\n print(\"\")\n \n # Check for USE_GLOBAL_DEVICE_CONFIG flag\n if config.USE_GLOBAL_DEVICE_CONFIG:\n processInitMessage(None)\n return\n \n # Send universal device inquiry\n device.midiOutSysex(consts.DEVICE_INQUIRY_MESSAGE)\n \n\ndef processInitMessage(command):\n # Recieves a universal device query response\n \n # If command isn't response to device inquiry\n \n if command is None:\n device_id = None\n else:\n if not command.type is eventconsts.TYPE_SYSEX:\n return\n else:\n device_id = command.sysex[5 : -5].hex()\n \n if device_id is None:\n if config.USE_GLOBAL_DEVICE_CONFIG:\n print(\"Device ID: [Not Requested]\")\n else:\n print(\"Device ID: [No Response]\")\n else:\n print(\"Device ID: \\\"\" + str(device_id) + \"\\\"\")\n \n \n \n # Import the configuration for the controller\n result = deviceconfig.loadSetup(device_id)\n \n if result == 0:\n print(\"An autoinit file could not be found for your device\")\n initState.setVal(consts.INIT_SETUP)\n \n elif result == -1:\n print(\"A compatible configuration was found but an error occurred while importing it\")\n initState.setVal(consts.INIT_FAIL)\n \n else:\n if result == 1:\n print(\"Device properties imported successfully\")\n if result == 2:\n print(\"Device properties loaded from global configuration\")\n initState.setVal(consts.INIT_SUCCESS)\n \n print(helpers.getLineBreak())\n print(\"\")\n \n if initState == consts.INIT_SETUP:\n learn.setCurrent(eventconsts.TYPE_TRANSPORT, eventconsts.CONTROL_PLAY, \n \"To begin manual setup, press the play button on your controller\")\n \n\ndef processSetup(command):\n command.addProcessor(\"Setup processor\")\n if learn[0] == eventconsts.TYPE_TRANSPORT:\n transport.setupTransport(command)\n elif learn[0] == eventconsts.TYPE_JOG:\n jog.setupJog(command)\n elif learn[0] == eventconsts.TYPE_FADER:\n fader.setupFader(command)\n elif learn[0] == eventconsts.TYPE_FADER_BUTTON:\n fader.setupFaderButton(command)\n elif learn[0] == eventconsts.TYPE_SOLO_BUTTON:\n fader.setupSoloButton(command)\n elif learn[0] == eventconsts.TYPE_KNOB:\n fader.setupKnob(command)\n elif learn[0] == eventconsts.TYPE_DRUM_PAD:\n drumpad.setupDrums(command)\n elif learn[0] == consts.INIT_SUCCESS:\n offerPrintout(command)\n \n command.refreshId()\n \n if not command.handled:\n command.handle(\"Setup catch-all\")\n\ndef offerPrintout(command):\n if command.getId() == (eventconsts.TYPE_TRANSPORT, eventconsts.CONTROL_PLAY) and command.is_lift:\n detector.dumpAutoinitScript()\n initState.setVal(consts.INIT_SUCCESS)\n command.handle(\"Dump autoinit script, finished initialisation\")\n elif command.getId() == (eventconsts.TYPE_TRANSPORT, eventconsts.CONTROL_STOP) and command.is_lift:\n command.handle(\"Finished initialisation\")\n initState.setVal(consts.INIT_SUCCESS)\n\ndef idleSetup():\n # Set hint message before setup begins\n if learn[0] is eventconsts.TYPE_TRANSPORT and learn[1] is eventconsts.CONTROL_STOP:\n ui.setHintMsg(\"Navigate to \\\"View > Script output\\\" to set up your controller\")\n\ndef idleInit():\n \n # Check for device ID timeout\n if window.getAbsoluteTick() > consts.INIT_TIMEOUT:\n print(\"Note:\")\n print(\"The linked device didn't respond to the universal device inquiry message within the timeout time.\")\n print(\"This device will load from the global device properties\")\n \n processInitMessage(None)\n\nfrom . import transport, jog, fader, drumpad\n\nfrom ..parse import detector\n","repo_name":"MiguelGuthridge/Legacy-Universal-Controller-Script","sub_path":"internal/setup/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"36461453764","text":"import time\nimport os\nimport shutil\nimport numpy as np\nfrom model.model_config import *\nfrom model.model import *\nfrom utils.misc import print_config, print_flags, get_num_para, print_variable\nfrom dataset import reader_v2\n\nFLAGS = tf.flags.FLAGS\n\ntf.flags.DEFINE_string(\"data_path\", 'data', \"Where the training/test data is stored.\")\ntf.flags.DEFINE_string(\"save_path\", None, \"Model output directory.\")\ntf.flags.DEFINE_boolean(\"debug\", False, \"whether in debug mode or not.\")\ntf.flags.DEFINE_boolean(\"allow_growth\", True, \"GPU allow_growth.\")\ntf.flags.DEFINE_float(\"gpu_memory_fraction\", 1, \"gpu_memory_fraction.\")\n\ntf.flags.DEFINE_boolean(\"larger_hidden_size\", False, \"whether increase the number of hidden units given sparsity.\")\n\n\ndef main(unused_argv):\n if not FLAGS.data_path:\n raise ValueError(\"Must set --data_path to PTB data directory\")\n if FLAGS.debug:\n print(\"########## Debug Mode ##########\")\n if os.path.exists(FLAGS.save_path):\n print(\"Remove previous model cache\")\n shutil.rmtree(FLAGS.save_path)\n if not FLAGS.save_path:\n raise ValueError(\"save_path need to be specified\")\n if not os.path.exists(FLAGS.save_path):\n print(\"Create save directory {}\".format(FLAGS.save_path))\n os.makedirs(FLAGS.save_path)\n\n raw_data = reader.ptb_raw_data(FLAGS.data_path) # numericalized data: text --> integer\n train_data, valid_data, test_data, _ = raw_data\n\n # raw_data = reader_v2.ptb_raw_data_sentence(FLAGS.data_path) # numericalized data: text --> integer\n # train_data, valid_data, test_data, _ = raw_data\n\n\n\n config = get_config()\n eval_config = get_eval_config()\n if FLAGS.larger_hidden_size and FLAGS.model_type == \"sparse\":\n config.hidden_size = int(np.square(int(np.sqrt(config.hidden_size / np.sqrt(FLAGS.sparsity)))))\n eval_config.hidden_size = config.hidden_size\n print(\"Increase Hidden Size: {}\".format(config.hidden_size))\n print_config(config)\n print_flags(FLAGS)\n\n with tf.Graph().as_default():\n initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)\n\n with tf.name_scope(\"Train\"):\n train_input = PTBInput(config=config, data=train_data, name=\"TrainInput\")\n with tf.variable_scope(\"Model\", reuse=None, initializer=initializer):\n print(\"Create train Model!\")\n m = PTBModel(is_training=True, config=config, input_=train_input)\n tf.summary.scalar(\"Training Loss\", m.cost)\n tf.summary.scalar(\"Learning Rate\", m.lr)\n\n with tf.name_scope(\"Valid\"):\n valid_input = PTBInput(config=config, data=valid_data, name=\"ValidInput\")\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n print(\"Create valid Model!\")\n mvalid = PTBModel(is_training=False, config=config, input_=valid_input)\n tf.summary.scalar(\"Validation Loss\", mvalid.cost)\n\n with tf.name_scope(\"Test\"):\n test_input = PTBInput(config=eval_config, data=test_data, name=\"TestInput\")\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n print(\"Create test Model!\")\n mtest = PTBModel(is_training=False, config=eval_config, input_=test_input)\n\n print(\"####### Total Number of Parameters: {}\".format(get_num_para()))\n print_variable()\n\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction,\n allow_growth=FLAGS.allow_growth)\n session_config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)\n\n sv = tf.train.Supervisor(logdir=FLAGS.save_path)\n with sv.managed_session(config=session_config) as session:\n for i in range(config.max_max_epoch):\n lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)\n m.assign_lr(session, config.learning_rate * lr_decay)\n\n print(\"Epoch: %d Learning rate: %.3f\" % (i + 1, session.run(m.lr)))\n train_perplexity = run_epoch(session, m, eval_op=m.train_op,\n verbose=True)\n print(\"Epoch: %d Train Perplexity: %.3f\" % (i + 1, train_perplexity))\n valid_perplexity = run_epoch(session, mvalid)\n print(\"Epoch: %d Valid Perplexity: %.3f\" % (i + 1, valid_perplexity))\n\n test_perplexity = run_epoch(session, mtest)\n print(\"Test Perplexity: %.3f\" % test_perplexity)\n\n if FLAGS.save_path:\n print(\"Saving model to %s.\" % FLAGS.save_path)\n sv.saver.save(session, os.path.join(os.getcwd(), FLAGS.save_path) + \"/model\",\n global_step=sv.global_step)\n\n\ndef run_epoch(session, model, eval_op=None, verbose=False):\n \"\"\"Runs the model on the given data.\"\"\"\n start_time = time.time()\n costs = 0.0\n iters = 0\n state = session.run(model.initial_state)\n\n fetches = {\n \"cost\": model.cost,\n \"final_state\": model.final_state,\n }\n if eval_op is not None:\n fetches[\"eval_op\"] = eval_op\n\n for step in range(model.input.epoch_size):\n feed_dict = {}\n for i, (c, h) in enumerate(model.initial_state):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n\n costs += cost\n iters += model.input.num_steps\n\n if verbose and step % (model.input.epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / model.input.epoch_size, np.exp(costs / iters),\n iters * model.input.batch_size / (time.time() - start_time)))\n\n return np.exp(costs / iters)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n # python ptb_word_knet.py --data_path data --model large --useKnetOutput --useKnet\n # python main.py --data_path data --model_size small --model_type baseline --optimizer adam\n # python main.py --data_path data --model_size small --model_type sparse --optimizer adam --debug --save_path temp\n\n # baseline: 816 637 3893200 561 513; 449 429 speed:12.9k\n # k_out: 840 647 3293200 571 525; 454 424 speed:12.6k\n # 0.01 sparse: 1931 767 442000 773 763 767 760\n\n # Largest 3000 hidden speed: 2.1k 120M\n # baseline: 563 372; 276 287;\n # knet: 624 433; 317 307; 212 262; 160 240\n","repo_name":"mie324/SparseLang","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11161081091","text":"import praw\nimport time\n\n# Initialize the Reddit API client\nreddit = praw.Reddit(client_id='',\n client_secret='',\n username='',\n password='',\n user_agent='')\n\n# Choose the subreddit where you want to post the message\nsubreddit = reddit.subreddit('subreddit')\n\n# Read the contents of the message file.\nwith open('./message.txt', 'r') as f:\n message_template = f.read()\n posted_count = 0\n\nplaceholder = 'example'\nplaceholder_list = ['example 1',\n 'example 2',\n 'example 3',\n 'example 4',\n 'example 5']\n\nfor p in placeholder_list:\n message = message_template.replace(\"[PLACEHOLDER]\", p)\n posted_count = posted_count + 1\n print(str(posted_count) + \"). \" + p + \" was posted.\")\n # title for your post, selftext is the message change the message in message.txt\n subreddit.submit(title=p + 'xyz title', selftext=message)\n # Pause for 5 minutes (300 seconds) before posting the next message\n time.sleep(60)\n","repo_name":"d6x/reddit-post-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"43210120553","text":"from bs4 import BeautifulSoup as soup\nfrom urllib.request import Request, urlopen\nimport pprint\n\nmy_url = 'http://www.safecosmetics.org/get-the-facts/chem-of-concern/'\nreq = Request(my_url, headers = {'User-Agent': 'Mozilla/5.0'})\n\nweb_byte = urlopen(req).read()\npage_html = web_byte.decode('utf-8')\n\n\n# html parsing\npage_soup = soup(page_html, 'lxml')\n\n# Grab all headers\ndef headers():\n headers = []\n for header in page_soup.find_all('h2'):\n headers.append(header.text.strip())\n return headers\n\n# Grab all the links of chemicals and put it into a 'links'\n\ndef links():\n links = []\n for href in page_soup.find_all('h2'):\n try:\n link = href.find('a')\n links.append(link.get('href'))\n except:\n continue\n return links\n\nif __name__ == 'main':\n headers()\n links()\n\n\n\n\n\n\n\n","repo_name":"aryajayadevkm/hackathon","sub_path":"chemicals_of_concern.py","file_name":"chemicals_of_concern.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74466787783","text":"import flask\r\nfrom flask import request,jsonify,send_from_directory,send_file, redirect\r\nimport numpy as np\r\n# importing Qiskit\r\nfrom qiskit import BasicAer, IBMQ\r\nfrom qiskit import QuantumCircuit, assemble, execute,ClassicalRegister\r\n# import basic plot tools\r\nfrom qiskit.providers.ibmq import least_busy\r\nfrom qiskit.tools.monitor import job_monitor\r\nfrom PIL import Image\r\nimport io\r\nimport json\r\nimport base64\r\nfrom qiskit.circuit import qpy_serialization\r\nfrom qiskit.aqua.components.oracles import TruthTableOracle\r\nimport operator\r\nfrom flask_swagger_ui import get_swaggerui_blueprint\r\n\r\napp = flask.Flask(__name__)\r\napp.config['DEBUG'] = True\r\n\r\n@app.route('/static/')\r\ndef send_static(path):\r\n return send_from_directory('/static', path)\r\n\r\n\r\nswagger_url = '/home'\r\nAPI_url = '/static/bv_api_new.json'\r\nswagger_ui_blueprint = get_swaggerui_blueprint(swagger_url,API_url,config={'app_name':'QuLib'})\r\napp.register_blueprint(swagger_ui_blueprint, url_prefix=swagger_url)\r\n\r\n@app.route('/',methods=['GET'])\r\ndef index():\r\n return redirect('/home')\r\n\r\n@app.route('/demo/get_BV_oracle',methods=['GET'])\r\ndef build_BV_oracle():\r\n if 'key' in request.args:\r\n key = request.args['key']\r\n n = len(key)\r\n elif 'qubits' in request.args:\r\n n = int(request.args['qubits'])\r\n key = np.random.randint(0, pow(2, n) - 1)\r\n key = format(key, '0' + str(n) + 'b')\r\n else:\r\n return jsonify({'ERROR': 'Cannot specify key bitstring'})\r\n oracle = QuantumCircuit(n + 1, n)\r\n for i in range(n):\r\n oracle.h(i)\r\n oracle.x(n)\r\n oracle.h(n)\r\n oracle.barrier()\r\n for i, v in enumerate(key):\r\n if v == '1':\r\n oracle.cx(i, n)\r\n oracle.barrier()\r\n for i in range(n):\r\n oracle.h(i)\r\n oracle.measure(i, i)\r\n buf = io.BytesIO()\r\n qpy_serialization.dump(oracle, buf)\r\n oracle.draw(output='mpl').savefig('circuit_img.png')\r\n response = send_file('circuit_img.png',mimetype='image/png')\r\n response.headers['oracle']=base64.b64encode(buf.getvalue()).decode('utf8')\r\n response.headers['key'] = key\r\n # pil_img = Image.open('circuit_img.png',mode='r')\r\n # byte_arr = io.BytesIO()\r\n # pil_img.save(byte_arr,format='PNG')\r\n # enc_img = base64.encodebytes(byte_arr.getvalue()).decode('ascii')\r\n # json_str = json.dumps({\r\n # 'oracle': base64.b64encode(buf.getvalue()).decode('utf8'),\r\n # 'key': key,\r\n # 'img': send_file('circuit_img.png',mimetype='image/gif')\r\n # })\r\n return response\r\n\r\n\r\n@app.route('/demo/get_BV_key',methods=['GET'])\r\ndef get_key_():\r\n if 'oracle' in request.args:\r\n circuit_json = request.args['oracle']\r\n qpy_file = io.BytesIO(base64.b64decode(circuit_json))\r\n circuit = qpy_serialization.load(qpy_file)[0]\r\n else:\r\n return jsonify({'ERROR': 'No Oracle circuit found.'})\r\n simulator = BasicAer.get_backend('qasm_simulator')\r\n job = execute(circuit, simulator, shots=1, memory=True)\r\n result = job.result()\r\n measurement = result.get_memory()[0]\r\n measurement = measurement[::-1]\r\n return jsonify({'key': measurement})\r\n\r\n\r\n@app.route('/BVazirani',methods=['GET'])\r\ndef apply_bv():\r\n if 'bitmap' in request.args:\r\n bitmap = request.args['bitmap']\r\n n = int(np.log2(len(bitmap)))\r\n if len(bitmap) != pow(2, n):\r\n return jsonify({'ERROR': 'bitmap length should be in powers of 2.'})\r\n else:\r\n return jsonify({'ERROR': 'Please provide bitmap.'})\r\n if 'api_key' in request.args:\r\n API_KEY = request.args['api_key']\r\n else:\r\n return jsonify({'ERROR': 'No IBM-Q API key found.'})\r\n\r\n oracle = TruthTableOracle(bitmap, optimization=True, mct_mode='noancilla')\r\n superpos = QuantumCircuit(oracle.variable_register, oracle.output_register)\r\n superpos.h(oracle.variable_register)\r\n superpos.x(oracle.output_register)\r\n superpos.h(oracle.output_register)\r\n circuit = oracle.construct_circuit()\r\n circuit = circuit.compose(superpos, front=True)\r\n desup = QuantumCircuit(oracle.variable_register, oracle.output_register)\r\n desup.h(oracle.variable_register)\r\n circuit = circuit.compose(desup)\r\n msr = ClassicalRegister(oracle.variable_register.size)\r\n circuit.add_register(msr)\r\n circuit.measure(oracle.variable_register, msr)\r\n\r\n IBMQ.enable_account(API_KEY)\r\n provider = IBMQ.get_provider('ibm-q')\r\n backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= (n + 1) and\r\n not x.configuration().simulator and\r\n x.status().operational == True))\r\n # backend = provider.get_backend('ibmq_5_yorktown')\r\n # simulator = BasicAer.get_backend('qasm_simulator')\r\n job = execute(circuit, backend, shots=1024)\r\n job_monitor(job)\r\n result = job.result()\r\n noisy_keys = result.get_counts()\r\n key = max(noisy_keys.items(), key=operator.itemgetter(1))[0]\r\n IBMQ.disable_account()\r\n return jsonify({'key': key})\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","repo_name":"decoyindus/qulib-BVZR","sub_path":"b_vazir.py","file_name":"b_vazir.py","file_ext":"py","file_size_in_byte":5152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39211820134","text":"import argparse\nimport pickle\nimport sys\nimport yaml\nimport logging\nimport utils\nimport time\nimport glob2\nimport os\nimport numpy as np\nfrom model import Model\nimport hyperparameter_tuning\nimport visualize\n\n\n# define command line args\nparser = argparse.ArgumentParser(prog=('python %s' % sys.argv[0]), description='Build an image classification model.')\noptional_args = parser._action_groups.pop()\nrequired_args = parser.add_argument_group('required arguments')\nparser._action_groups.append(optional_args)\n\noptional_args.add_argument('-v', help='Verbose output (debug)', action='store_true')\noptional_args.add_argument('-e', type=str, help='The output location of cross validation errors for each config, for analysis after training is complete', default='', metavar='ERROR_INFO_FP')\noptional_args.add_argument('-store-all', help='Store all models (default: store only the best models)', action='store_true')\noptional_args.add_argument('-visualize', help='Create visualizations of model components and/or parameters', default='', metavar='VISUALIZE_OPTIONS')\noptional_args.add_argument('-save-vis', help='Save visualizations to file', action='store_true')\n\nrequired_args.add_argument('-train', type=str, help='The location of the training snippets', required=True, metavar='TRAIN_DIR')\nrequired_args.add_argument('-test', type=str, help='The location of the testing snippets', required=True, metavar='TEST_DIR')\nrequired_args.add_argument('-c', type=str, help='The location of the config file with the model parameters', required=True, metavar='CONFIG')\nrequired_args.add_argument('-m', type=str, help='The output location of the created model', required=True, metavar='MODEL_OUTPUT_FP')\n\n\n# load command line args\nargs = parser.parse_args(sys.argv[1:] if len(sys.argv) > 1 else ['-h'])\ntrain_dir = args.train\ntest_dir = args.train\nconfig_fp = args.c\nmodel_output_fp = args.m\nverbose = args.v\nstore_all = args.store_all\nerror_info_fp = args.e\nvisualize_params = args.visualize\nsave_vis = args.save_vis\n\n# set logger, start stopwatch\nlogging_level = logging.DEBUG if verbose else logging.INFO\nlogging.basicConfig(stream=sys.stdout, level=logging_level)\nsw = utils.Stopwatch(); sw.start()\n\n# load model config\nlogging.info('Loading model config...')\nwith open(config_fp, 'rt') as config_file:\n model_args = yaml.load(config_file)\n\n# import training data\nlogging.info('Loading train data...')\ntrain_im_fps = glob2.glob(os.path.join(train_dir, '**/snippets/**/*.png'))\ntrain_ims = utils.import_images(train_im_fps)\ntrain_im_labels = utils.get_labels_from_fps(train_im_fps)\n\n# import testing data\nlogging.info('Loading test data...')\ntest_im_fps = glob2.glob(os.path.join(test_dir, '**/snippets/**/*.png'))\ntest_ims = utils.import_images(test_im_fps)\ntest_im_labels = utils.get_labels_from_fps(test_im_fps)\n\nimport pprint\ndef build_model(model_config, X, y):\n logging.info('Building new model...')\n model = Model(**model_config)\n\n logging.info('Building BOVW...')\n model.BOVW_create(X, y)\n\n logging.info('Training model...')\n model.train(X, y)\n\n return model\n\ndef cross_val_model(model_config, X, y):\n n_folds = model_config['n_folds']\n logging.info('Running k-fold cross-validation on model (k=%d)...' % n_folds)\n\n cross_val_errors = []\n for j, ((X_cv_train, y_cv_train), (X_cv_test, y_cv_test)) \\\n in enumerate(hyperparameter_tuning.get_folds(X, y, n_splits=n_folds)):\n\n print(); logging.info('Fold #%d of %d' % (j+1, n_folds))\n\n model = build_model(model_config, X_cv_train, y_cv_train)\n _, val_error = test_model(model, X_cv_test, y_cv_test, test_type='Validation fold')\n cross_val_errors.append(val_error)\n\n avg_cross_val_error = utils.compute_average(cross_val_errors)\n print(); logging.info('Average cross validation error: %g\\n' % avg_cross_val_error)\n return avg_cross_val_error\n\ndef test_model(model, X, y, test_type='Test'):\n logging.info('Testing model...')\n predictions = model.predict(X)\n err = utils.compute_error(y, predictions)\n acc = 1 - err\n num_total = len(y)\n num_correct = int(round(acc * num_total))\n num_incorrect = num_total - num_correct\n logging.info('%s accuracy: %g (%d/%d)' % (test_type, acc, num_correct, num_total))\n logging.info('%s error: %g (%d/%d)' % (test_type, err, num_incorrect, num_total))\n return acc, err\n\n# \nmodel_configs = hyperparameter_tuning.get_model_configs(model_args)\nall_models = hyperparameter_tuning.create_all_models(model_configs, build_model, cross_val_model, test_model, train_ims, train_im_labels, test_ims, test_im_labels)\n\nbest_models = hyperparameter_tuning.find_best_models(all_models)\nbest_test_acc = best_models[0][3]\n\n#\nprint('\\n\\n'); logging.info('Saving models...')\nwith open(model_output_fp, 'w+b') as model_output_file:\n\n if store_all:\n pickle.dump(all_models, model_output_file)\n logging.info('All %d model(s) and train+test statistics saved to \\'%s\\'.' % (len(model_configs), model_output_fp))\n\n else:\n pickle.dump(best_models, model_output_file)\n logging.info('%d/%d model(s) (test_acc=%g) and train+test statistics saved to \\'%s\\'.' % (len(best_models), len(model_configs), best_test_acc, model_output_fp))\n logging.info('Random best model config:\\n%s' % pprint.pformat(best_models[0][1]))\n\n#\nsw.stop()\nprint(); logging.info('Model creation took %s.' % sw.format_str())\n\n\nif visualize_params:\n logging.info('Creating visualizations...')\n visualize.create_visualizations(all_models, visualize_params, save_vis, model_output_fp)\n\n","repo_name":"josephcappadona/automated-CV-analytics","sub_path":"src/model/build_model.py","file_name":"build_model.py","file_ext":"py","file_size_in_byte":5554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28927277724","text":"def to_int(bin): # from definition\r\n n = 0\r\n for i, b in enumerate(reversed(bin)):\r\n print(i,b)\r\n if b == '1':\r\n if i != (len(bin)-1):\r\n n += 2**i\r\n else: # MSB\r\n n -= 2**i\r\n return n\r\n\r\n\r\nprint(to_int('1111110100101'))\r\ndef bin2dec(b):\r\n number = 0\r\n counter = 0\r\n for t in b[::-1]: # Iterating through b in reverse order\r\n number += int(t)*(2**counter)\r\n counter += 1\r\n\r\n return number","repo_name":"b21627868/drafts","sub_path":"uzaylı2.py","file_name":"uzaylı2.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8059672491","text":"#from controlpanel import Ui_MainWindow\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QWidget\nIO_GUI_HEIGHT = 180\nclass display(QWidget):\n \n def __init__(self, centralwidget):\n super().__init__(centralwidget)\n \n self.DisplayToolbox = QtWidgets.QLabel(centralwidget)\n self.DisplayToolbox.setGeometry(QtCore.QRect(10, IO_GUI_HEIGHT, 200, 30))\n font = QtGui.QFont()\n font.setFamily(\".Farah PUA\")\n font.setPointSize(24)\n font.setBold(False)\n font.setItalic(True)\n font.setWeight(50)\n font.setKerning(False)\n self.DisplayToolbox.setFont(font)\n self.DisplayToolbox.setObjectName(\"AnalysisLbl\")\n\n self.ColSpinBox = QtWidgets.QSpinBox(self)\n self.ColSpinBox.setGeometry(QtCore.QRect(450, 530+ IO_GUI_HEIGHT, 48, 21))\n font = QtGui.QFont()\n font.setPointSize(13)\n self.ColSpinBox.setFont(font)\n self.ColSpinBox.setObjectName(\"ColSpinBox\")\n \n self.viewer = PhotoViewer(self)\n \n self.ApplyMaxMin = QtWidgets.QPushButton(self)\n self.ApplyMaxMin.setGeometry(QtCore.QRect(440, 130 + IO_GUI_HEIGHT, 71, 32))\n self.ApplyMaxMin.setObjectName(\"ApplyMaxMin\")\n self.HistogramsView = QtWidgets.QGraphicsView(self)\n self.HistogramsView.setGeometry(QtCore.QRect(10, 30 + IO_GUI_HEIGHT, 231, 101))\n self.HistogramsView.setObjectName(\"HistogramsView\")\n self.ResetHistogram = QtWidgets.QPushButton(self)\n self.ResetHistogram.setGeometry(QtCore.QRect(370, 130 + IO_GUI_HEIGHT, 71, 32))\n self.ResetHistogram.setObjectName(\"ResetHistogram\")\n self.MaxHistSpinBox = QtWidgets.QSpinBox(self)\n self.MaxHistSpinBox.setGeometry(QtCore.QRect(470, 100 + IO_GUI_HEIGHT, 48, 24))\n font = QtGui.QFont()\n font.setPointSize(13)\n self.MaxHistSpinBox.setFont(font)\n self.MaxHistSpinBox.setObjectName(\"MaxHistSpinBox\")\n self.MaxHistSlider = QtWidgets.QSlider(self)\n self.MaxHistSlider.setGeometry(QtCore.QRect(280, 100 + IO_GUI_HEIGHT, 191, 22))\n self.MaxHistSlider.setOrientation(QtCore.Qt.Horizontal)\n self.MaxHistSlider.setObjectName(\"MaxHistSlider\")\n self.MaxHistLbl = QtWidgets.QLabel(self)\n self.MaxHistLbl.setGeometry(QtCore.QRect(250, 100 + IO_GUI_HEIGHT, 31, 21))\n self.MaxHistLbl.setObjectName(\"MaxHistLbl\")\n self.MinHistSpinBox = QtWidgets.QSpinBox(self)\n self.MinHistSpinBox.setGeometry(QtCore.QRect(470, 70 + IO_GUI_HEIGHT, 48, 24))\n font = QtGui.QFont()\n font.setPointSize(13)\n self.MinHistSpinBox.setFont(font)\n self.MinHistSpinBox.setObjectName(\"MinHistSpinBox\")\n self.MinHistLbl = QtWidgets.QLabel(self)\n self.MinHistLbl.setGeometry(QtCore.QRect(250, 70 + IO_GUI_HEIGHT, 31, 21))\n self.MinHistLbl.setObjectName(\"MinHistLbl\")\n self.MinHistSlider = QtWidgets.QSlider(self)\n self.MinHistSlider.setGeometry(QtCore.QRect(280, 70 + IO_GUI_HEIGHT, 191, 22))\n self.MinHistSlider.setOrientation(QtCore.Qt.Horizontal)\n self.MinHistSlider.setObjectName(\"MinHistSlider\")\n self.ColScroller = QtWidgets.QScrollBar(self)\n self.ColScroller.setGeometry(QtCore.QRect(30, 530 + IO_GUI_HEIGHT, 411, 16))\n self.ColScroller.setOrientation(QtCore.Qt.Horizontal)\n self.ColScroller.setObjectName(\"ColScroller\")\n self.ZScroller = QtWidgets.QScrollBar(self)\n self.ZScroller.setGeometry(QtCore.QRect(30, 560 + IO_GUI_HEIGHT, 411, 16))\n self.ZScroller.setOrientation(QtCore.Qt.Horizontal)\n self.ZScroller.setObjectName(\"ZScroller\")\n self.ZSpinBox = QtWidgets.QSpinBox(self)\n self.ZSpinBox.setGeometry(QtCore.QRect(450, 560 + IO_GUI_HEIGHT, 48, 21))\n font = QtGui.QFont()\n font.setPointSize(13)\n self.ZSpinBox.setFont(font)\n self.ZSpinBox.setObjectName(\"ZSpinBox\")\n self.TScroller = QtWidgets.QScrollBar(self)\n self.TScroller.setGeometry(QtCore.QRect(30, 590 + IO_GUI_HEIGHT, 411, 16))\n self.TScroller.setOrientation(QtCore.Qt.Horizontal)\n self.TScroller.setObjectName(\"TScroller\")\n self.TSpinBox = QtWidgets.QSpinBox(self)\n self.TSpinBox.setGeometry(QtCore.QRect(450, 590 + IO_GUI_HEIGHT, 48, 20))\n font = QtGui.QFont()\n font.setPointSize(13)\n self.TSpinBox.setFont(font)\n self.TSpinBox.setObjectName(\"TSpinBox\")\n self.ColLabel = QtWidgets.QLabel(self)\n self.ColLabel.setGeometry(QtCore.QRect(5, 530 + IO_GUI_HEIGHT, 21, 20))\n self.ColLabel.setObjectName(\"ColLabel\")\n self.ZLabel = QtWidgets.QLabel(self)\n self.ZLabel.setGeometry(QtCore.QRect(10, 560 + IO_GUI_HEIGHT, 16, 16))\n self.ZLabel.setObjectName(\"ZLabel\")\n self.TLabel = QtWidgets.QLabel(self)\n self.TLabel.setGeometry(QtCore.QRect(10, 590 + IO_GUI_HEIGHT, 16, 16))\n self.TLabel.setObjectName(\"TLabel\")\n self.RowScroller = QtWidgets.QScrollBar(self)\n self.RowScroller.setGeometry(QtCore.QRect(10, 170 + IO_GUI_HEIGHT, 16, 351))\n self.RowScroller.setOrientation(QtCore.Qt.Vertical)\n self.RowScroller.setObjectName(\"RowScroller\")\n self.RowSpinBox = QtWidgets.QSpinBox(self)\n self.RowSpinBox.setGeometry(QtCore.QRect(0, 146 + IO_GUI_HEIGHT, 41, 20))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.RowSpinBox.setFont(font)\n self.RowSpinBox.setObjectName(\"RowSpinBox\")\n self.RowLabel = QtWidgets.QLabel(self)\n self.RowLabel.setGeometry(QtCore.QRect(0, 130 + IO_GUI_HEIGHT, 31, 21))\n self.RowLabel.setObjectName(\"RowLabel\")\n self.HistAutoAdjust = QtWidgets.QPushButton(self)\n self.HistAutoAdjust.setGeometry(QtCore.QRect(300, 130 + IO_GUI_HEIGHT, 71, 32))\n self.HistAutoAdjust.setObjectName(\"HistAutoAdjust\")\n self.Ch1CheckBox = QtWidgets.QCheckBox(self)\n self.Ch1CheckBox.setGeometry(QtCore.QRect(450, 170 + IO_GUI_HEIGHT, 51, 20))\n self.Ch1CheckBox.setObjectName(\"Ch1CheckBox\")\n self.Ch1CheckBox.setStyleSheet(\"color: gray\")\n self.Ch2CheckBox = QtWidgets.QCheckBox(self)\n self.Ch2CheckBox.setGeometry(QtCore.QRect(450, 190 + IO_GUI_HEIGHT, 51, 20))\n self.Ch2CheckBox.setObjectName(\"Ch2CheckBox\")\n self.Ch2CheckBox.setStyleSheet(\"color: red\")\n self.Ch3CheckBox = QtWidgets.QCheckBox(self)\n self.Ch3CheckBox.setGeometry(QtCore.QRect(450, 210 + IO_GUI_HEIGHT, 51, 20))\n self.Ch3CheckBox.setObjectName(\"Ch3CheckBox\")\n self.Ch3CheckBox.setStyleSheet(\"color: green\")\n self.Ch4CheckBox = QtWidgets.QCheckBox(self)\n self.Ch4CheckBox.setGeometry(QtCore.QRect(450, 230 + IO_GUI_HEIGHT, 51, 20))\n self.Ch4CheckBox.setObjectName(\"Ch4CheckBox\")\n self.Ch4CheckBox.setStyleSheet(\"color: blue\")\n self.Ch5CheckBox = QtWidgets.QCheckBox(self)\n self.Ch5CheckBox.setGeometry(QtCore.QRect(450, 250 + IO_GUI_HEIGHT, 51, 20))\n self.Ch5CheckBox.setObjectName(\"Ch5CheckBox\")\n self.Ch5CheckBox.setStyleSheet(\"color: orange\")\n \n self.HistChLabel = QtWidgets.QLabel(self)\n self.HistChLabel.setGeometry(QtCore.QRect(250, 30 + IO_GUI_HEIGHT, 61, 31))\n font = QtGui.QFont()\n font.setPointSize(14)\n self.HistChLabel.setFont(font)\n self.HistChLabel.setObjectName(\"HistChLabel\")\n self.HistChannel = QtWidgets.QComboBox(self)\n self.HistChannel.setGeometry(QtCore.QRect(310, 30 + IO_GUI_HEIGHT, 71, 31))\n font = QtGui.QFont()\n font.setPointSize(13)\n self.HistChannel.setFont(font)\n self.HistChannel.setObjectName(\"HistChannel\")\n self.HistChannel.addItem(\"Ch 1\")\n self.HistChannel.addItem(\"Ch 2\")\n self.HistChannel.addItem(\"Ch 3\")\n self.HistChannel.addItem(\"Ch 4\")\n self.FOVScroller = QtWidgets.QScrollBar(self)\n self.FOVScroller.setGeometry(QtCore.QRect(80, 140 + IO_GUI_HEIGHT, 141, 16))\n self.FOVScroller.setOrientation(QtCore.Qt.Horizontal)\n self.FOVScroller.setObjectName(\"FOVScroller\")\n self.FOVSpinBox = QtWidgets.QSpinBox(self)\n self.FOVSpinBox.setGeometry(QtCore.QRect(230, 140 + IO_GUI_HEIGHT, 41, 21))\n font = QtGui.QFont()\n font.setPointSize(11)\n self.FOVSpinBox.setFont(font)\n self.FOVSpinBox.setObjectName(\"FOVSpinBox\")\n self.FOVLabel = QtWidgets.QLabel(self)\n self.FOVLabel.setGeometry(QtCore.QRect(50, 140 + IO_GUI_HEIGHT, 31, 20))\n self.FOVLabel.setObjectName(\"FOVLabel\")\n \n self.NuclMaskCheckBox = QtWidgets.QCheckBox(self)\n self.NuclMaskCheckBox.setGeometry(QtCore.QRect(5, 615 + IO_GUI_HEIGHT, 70, 20))\n self.NuclMaskCheckBox.setObjectName(\"NucMaskCheckBox\")\n self.NuclMaskCheckBox.setStyleSheet(\"color: red\")\n self.NucPreviewMethod = QtWidgets.QComboBox(self)\n self.NucPreviewMethod.setGeometry(QtCore.QRect(75, 610 + IO_GUI_HEIGHT, 110, 31))\n self.NucPreviewMethod.setObjectName(\"NucPreviewMethod\")\n self.NucPreviewMethod.addItem(\"Boundary\")\n self.NucPreviewMethod.addItem(\"Area\")\n \n self.SpotsCheckBox = QtWidgets.QCheckBox(self)\n self.SpotsCheckBox.setGeometry(QtCore.QRect(205, 615 + IO_GUI_HEIGHT, 60, 20))\n self.SpotsCheckBox.setObjectName(\"SpotDetection\")\n self.SpotsCheckBox.setStyleSheet(\"color: green\")\n \n self.SpotPreviewMethod = QtWidgets.QComboBox(self)\n self.SpotPreviewMethod.setGeometry(QtCore.QRect(265, 610 + IO_GUI_HEIGHT, 80, 31))\n self.SpotPreviewMethod.setObjectName(\"SpotPreviewMethod\")\n self.SpotPreviewMethod.addItem(\"Dots\")\n self.SpotPreviewMethod.addItem(\"Cross\")\n \n self.CytoPreviewCheck = QtWidgets.QCheckBox(self)\n self.CytoPreviewCheck.setGeometry(QtCore.QRect(365, 610 + IO_GUI_HEIGHT, 45, 31))\n self.CytoPreviewCheck.setObjectName(\"CytoPreviewCheck\")\n self.CytoPreviewCheck.setStyleSheet(\"color: blue\")\n \n self.CytoDisplayMethod = QtWidgets.QComboBox(self)\n self.CytoDisplayMethod.setGeometry(QtCore.QRect(410, 610 + IO_GUI_HEIGHT, 110, 31))\n self.CytoDisplayMethod.setObjectName(\"CytoDisplayMethod\")\n self.CytoDisplayMethod.addItem(\"Boundary\")\n self.CytoDisplayMethod.addItem(\"Area\")\n \n \n _translate = QtCore.QCoreApplication.translate\n self.DisplayToolbox.setText(_translate(\"MainWindow\", \"Display Toolbox\"))\n self.ApplyMaxMin.setText(_translate(\"MainWindow\", \"Apply\"))\n self.ResetHistogram.setText(_translate(\"MainWindow\", \"Reset\"))\n self.MaxHistLbl.setText(_translate(\"MainWindow\", \"Max\"))\n self.MinHistLbl.setText(_translate(\"MainWindow\", \"Min\"))\n self.ColLabel.setText(_translate(\"MainWindow\", \"Col\"))\n self.ZLabel.setText(_translate(\"MainWindow\", \"Z\"))\n self.TLabel.setText(_translate(\"MainWindow\", \"t\"))\n self.RowLabel.setText(_translate(\"MainWindow\", \"Row\"))\n self.HistAutoAdjust.setText(_translate(\"MainWindow\", \"Auto\"))\n self.Ch1CheckBox.setText(_translate(\"MainWindow\", \"Ch1\"))\n self.Ch2CheckBox.setText(_translate(\"MainWindow\", \"Ch2\"))\n self.Ch3CheckBox.setText(_translate(\"MainWindow\", \"Ch3\"))\n self.Ch4CheckBox.setText(_translate(\"MainWindow\", \"Ch4\"))\n self.Ch5CheckBox.setText(_translate(\"MainWindow\", \"Ch5\"))\n self.HistChLabel.setText(_translate(\"MainWindow\", \"Channel\"))\n self.HistChannel.setItemText(0, _translate(\"MainWindow\", \"Ch 1\"))\n self.HistChannel.setItemText(1, _translate(\"MainWindow\", \"Ch 2\"))\n self.HistChannel.setItemText(2, _translate(\"MainWindow\", \"Ch 3\"))\n self.HistChannel.setItemText(3, _translate(\"MainWindow\", \"Ch 4\"))\n self.FOVLabel.setText(_translate(\"MainWindow\", \"FOV\"))\n self.NuclMaskCheckBox.setText(_translate(\"MainWindow\", \"Nuclei\"))\n self.NucPreviewMethod.setItemText(0, _translate(\"MainWindow\", \"Boundary\"))\n self.NucPreviewMethod.setItemText(1, _translate(\"MainWindow\", \"Area\"))\n self.SpotsCheckBox.setText(_translate(\"MainWindow\", \"Spots\"))\n self.SpotPreviewMethod.setItemText(0, _translate(\"MainWindow\", \"Dots\"))\n self.SpotPreviewMethod.setItemText(1, _translate(\"MainWindow\", \"Cross\"))\n self.CytoPreviewCheck.setText(_translate(\"MainWindow\", \"Cell\"))\n self.CytoDisplayMethod.setItemText(0, _translate(\"MainWindow\", \"Boundary\"))\n self.CytoDisplayMethod.setItemText(1, _translate(\"MainWindow\", \"Area\"))\n\nclass PhotoViewer(QtWidgets.QGraphicsView):\n photoClicked = QtCore.pyqtSignal(QtCore.QPoint)\n\n def __init__(self, parent):\n super(PhotoViewer, self).__init__(parent)\n self._zoom = 0\n self._empty = True\n self.setGeometry(QtCore.QRect(30, 170+ IO_GUI_HEIGHT, 411, 350))\n self._scene = QtWidgets.QGraphicsScene(self)\n self._photo = QtWidgets.QGraphicsPixmapItem()\n self._scene.addItem(self._photo)\n self.setScene(self._scene)\n self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)\n self.setResizeAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.setBackgroundBrush(QtGui.QBrush(QtGui.QColor(30, 30, 30)))\n self.setFrameShape(QtWidgets.QFrame.NoFrame)\n\n def hasPhoto(self):\n return not self._empty\n\n def fitInView(self, scale=True):\n rect = QtCore.QRectF(self._photo.pixmap().rect())\n if not rect.isNull():\n self.setSceneRect(rect)\n if self.hasPhoto():\n unity = self.transform().mapRect(QtCore.QRectF(0, 0, 1, 1))\n self.scale(1 / unity.width(), 1 / unity.height())\n viewrect = self.viewport().rect()\n scenerect = self.transform().mapRect(rect)\n factor = min(viewrect.width() / scenerect.width(),\n viewrect.height() / scenerect.height())\n self.scale(factor, factor)\n self._zoom = 0\n \n# def fitInView(self, scale=True):\n# rect = QtCore.QRectF(self._photo.pixmap().rect())\n \n# self.setSceneRect(rect)\n \n# unity = self.transform().mapRect(QtCore.QRectF(0, 0, 1, 1))\n# self.scale(1 / unity.width(), 1 / unity.height())\n# viewrect = self.viewport().rect()\n# scenerect = self.transform().mapRect(rect)\n# factor = min(viewrect.width() / scenerect.width(),\n# viewrect.height() / scenerect.height())\n# self.scale(factor, factor)\n# self._zoom = 0 \n \n\n def setPhoto(self, pixmap=None):\n self._zoom = 0\n if pixmap and not pixmap.isNull():\n self._empty = False\n self.setDragMode(QtWidgets.QGraphicsView.ScrollHandDrag)\n self._photo.setPixmap(pixmap)\n else:\n self._empty = True\n self.setDragMode(QtWidgets.QGraphicsView.NoDrag)\n self._photo.setPixmap(QtGui.QPixmap())\n self.fitInView()\n \n\n def wheelEvent(self, event):\n if self.hasPhoto():\n if event.angleDelta().y() > 0:\n factor = 1.25\n self._zoom += 1\n else:\n factor = 0.8\n self._zoom -= 1\n if self._zoom > 0:\n self.scale(factor, factor)\n elif self._zoom == 0:\n self.fitInView()\n else:\n self._zoom = 0\n\n def toggleDragMode(self):\n if self.dragMode() == QtWidgets.QGraphicsView.ScrollHandDrag:\n self.setDragMode(QtWidgets.QGraphicsView.NoDrag)\n elif not self._photo.pixmap().isNull():\n self.setDragMode(QtWidgets.QGraphicsView.ScrollHandDrag)\n\n def mousePressEvent(self, event):\n if self._photo.isUnderMouse():\n self.photoClicked.emit(self.mapToScene(event.pos()).toPoint())\n super(PhotoViewer, self).mousePressEvent(event)\n\n","repo_name":"CBIIT/HiTIPS","sub_path":"build/lib/hitips/DisplayGUI.py","file_name":"DisplayGUI.py","file_ext":"py","file_size_in_byte":16112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"16036232740","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\nformula = (\n r'$'\n r'N = \\int_{E_\\mathrm{min}}^{E_\\mathrm{max}} '\n r'\\int_0^A'\n r'\\int_{t_\\mathrm{min}}^{t_\\mathrm{max}} '\n r'\\Phi_0 \\left(\\frac{E}{1\\,\\mathrm{GeV}}\\right)^{\\!\\!-\\gamma}'\n r' \\, \\mathrm{d}A \\, \\mathrm{d}t \\, \\mathrm{d}E'\n r'$'\n)\n\n\ndef power_law_spectrum(energy, normalisation, spectral_index):\n return normalisation * energy**(-spectral_index)\n\n\nbin_edges = np.logspace(2, 5, 15)\nbin_centers = 0.5 * (bin_edges[:-1] + bin_edges[1:])\n\ny = power_law_spectrum(bin_centers, 1e-5, 2.5)\nrelative_error = np.random.normal(1, 0.2, size=len(y))\ny_with_err = relative_error * y\n\nfig, ax = plt.subplots()\nax.errorbar(\n np.log10(bin_centers),\n y_with_err,\n xerr=[\n np.log10(bin_centers) - np.log10(bin_edges[:-1]),\n np.log10(bin_edges[1:]) - np.log10(bin_centers)\n ],\n yerr=0.5 * y_with_err,\n linestyle='',\n)\n\nax.set_xlabel(r'$\\log_{10}(E \\,\\,/ \\,\\, \\mathrm{GeV})$')\nax.set_ylabel(\n r'$\\Phi_0'\n r'\\,\\,/\\,\\,'\n r'(\\mathrm{GeV}^{-1} \\, \\mathrm{s}^{-1} \\, \\mathrm{sr}^{-1} \\mathrm{m}^{-2})$'\n)\n\nax.text(0.1, 0.1, formula, transform=ax.transAxes)\nax.set_yscale('log')\n\nfig.tight_layout(pad=0)\nfig.savefig('build/before.pdf')\n","repo_name":"escape2020/school2021","sub_path":"matplotlib-publication-quality/plot_before.py","file_name":"plot_before.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":246,"dataset":"github-code","pt":"81"} +{"seq_id":"1825229052","text":"import json\r\nfrom .base import SpiderPlugin\r\n\r\n\r\nclass TestPlugin(SpiderPlugin):\r\n def desc(self):\r\n return desc()\r\n\r\n def execute(self, parameters):\r\n return execute(parameters)\r\n\r\n def validate(self, parameters):\r\n try:\r\n enabled = get_bool(parameters.get('ENABLED', True))\r\n except ValueError:\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef get_bool(value):\r\n try:\r\n return bool(int(value))\r\n except ValueError:\r\n if value in (\"True\", \"true\"):\r\n return True\r\n if value in (\"False\", \"false\"):\r\n return False\r\n raise ValueError(\"Supported values for boolean settings \"\r\n \"are 0/1, True/False, '0'/'1', \"\r\n \"'True'/'False' and 'true'/'false'\")\r\n\r\n\r\ndef desc():\r\n return json.dumps({\r\n 'name': 'test_plugin',\r\n 'parameters': {\r\n 'ENABLED': {\r\n 'type': 'bool',\r\n 'required': True,\r\n 'default_value': True\r\n }\r\n }\r\n })\r\n\r\n\r\nTEMPLATE = '''\r\ntry: SPIDER_MIDDLEWARES\r\nexcept NameError: SPIDER_MIDDLEWARES = {}\r\nSPIDER_MIDDLEWARES['scrapy_splitvariants.SplitVariantsMiddleware']= 100\r\n\r\nSPLITVARIANTS_ENABLED = %(enabled)s\r\n'''\r\n\r\n\r\ndef execute(settings):\r\n enabled = get_bool(settings.get('ENABLED', True))\r\n return TEMPLATE % {'enabled': enabled}\r\n","repo_name":"kevenli/scrapydd","sub_path":"tests/test_plugin/test_plugin/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"21819189358","text":"def subset(data):\r\n for i in range(len(data)+1):\r\n yield from comb(data, i, 0, [])\r\n\r\ndef comb(data, r, start, progress):\r\n if r == 0:\r\n yield progress\r\n return\r\n\r\n for i in range(start, len(data)):\r\n yield from comb(data, r - 1, i + 1, progress + [data[i]])\r\n \r\nfor e in subset('ABCD'):\r\n print(e)","repo_name":"kmzn128/study","sub_path":"iter/subset.py","file_name":"subset.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72353593864","text":"from collections import defaultdict\nfrom Transistor import Transistor\n\n\n# Holds everything about netlist, every transistor inside it\nclass Netlist:\n def __init__(self, filename):\n self.filename = filename\n self.mosArray = {}\n self.mosTypeArray = [\"\", \"\"] # First NMOS then PMOS\n self.in_ports = []\n self.out_ports = []\n self.nets = defaultdict(set) # Dictionary\n\n def find_mos_type(self):\n with open(self.filename) as f:\n for line in f:\n tokenized = line.rstrip('\\n').split(\" \")\n if tokenized[0] == \".model\":\n if tokenized[2] == \"NMOS\":\n self.mosTypeArray[0] = tokenized[1]\n else:\n self.mosTypeArray[1] = tokenized[1]\n\n def print_netlist(self):\n print('|{:>8}'.format(\"Instance name\"), end=\" \")\n print('|{:>8}'.format(\"Mos type\"), end=\" \")\n print('|{:>8}'.format(\"Width\"), end=\" \")\n print('|{:>8}'.format(\"Length\"), end=\" \")\n print('|{:>8}'.format(\"Gate node\"), end=\" \")\n print('|{:>8}'.format(\"Source node\"), end=\" \")\n print('|{:>8}'.format(\"Drain node\"), end=\" \")\n print('|{:>8}'.format(\"Bulk node\"), end=\" \")\n print('|{:>8}'.format(\"Model\"), end=\" \")\n print('|{:>8}'.format(\"Pair (if any)\"), end=\"\\n\")\n for mos in self.mosArray:\n self.mosArray[mos].print_transistor()\n\n # Parses a SPICE netlist\n def parse(self):\n self.find_mos_type()\n with open(self.filename) as f:\n for line in f:\n tokenized = line.rstrip('\\n').split(\" \")\n if len(tokenized) == 8:\n if tokenized[5] == self.mosTypeArray[0]:\n typename = \"NMOS\"\n elif tokenized[5] == self.mosTypeArray[1]:\n typename = \"PMOS\"\n mos = Transistor(name=tokenized[0], drain=tokenized[1], gate=tokenized[2],\n source=tokenized[3], bulk=tokenized[4], mos_model=tokenized[5],\n length=tokenized[6], width=tokenized[7], typename=typename)\n self.nets[mos.drain].add(mos.instanceName + 'D')\n self.nets[mos.gate].add(mos.instanceName + 'G')\n self.nets[mos.source].add(mos.instanceName + 'S')\n self.nets[mos.bulk].add(mos.instanceName + 'B')\n self.mosArray[mos.instanceName] = mos\n elif tokenized[0] == \"*>>\": # External commands\n cmd = tokenized[1]\n if cmd == \"pairs\":\n self.mosArray[tokenized[2]].pair = self.mosArray[tokenized[3]]\n self.mosArray[tokenized[3]].pair = self.mosArray[tokenized[2]]\n elif cmd == \"port\":\n if tokenized[2] == \"input\":\n self.in_ports.append(tokenized[3])\n else:\n self.out_ports.append(tokenized[3])\n","repo_name":"fatihgulakar/spice_netlist_visualizer","sub_path":"Netlist.py","file_name":"Netlist.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38822300065","text":"# Reprensentation of a node in an avl tree\n# fields: key, left_child, right_child. height\nclass Node():\n def __init__(self, key):\n self.key = key\n self.left_child = None\n self.right_child = None\n self.height = 1\n\n# Provides methods for avl tree managment\nclass AVL_Tree():\n \n # Perform a left rotation around x as pivot point\n # Return the new root node of the subtree\n def left_rot(self, x: Node) -> Node:\n y = x.right_child\n T2 = y.left_child\n y.left_child = x\n x.right_child = T2\n\n # recalculate heights\n x.height = 1 + max(self.getHeight(x.left_child),\n self.getHeight(x.right_child))\n y.height = 1 + max(self.getHeight(y.left_child),\n self.getHeight(y.right_child))\n \n return y\n\n # Perfom a right rotation around x as pivot point\n # Return the new root node of the subtree\n def right_rot(self, x):\n y = x.left_child\n T3 = y.right_child\n y.right_child = x\n x.left_child = T3\n\n # recalculate heights\n x.height = 1 + max(self.getHeight(x.left_child),\n self.getHeight(x.right_child))\n y.height = 1 + max(self.getHeight(y.left_child),\n self.getHeight(y.right_child))\n return y\n \n # calculates and returns the balance factor of the tree\n def balance(self, root) -> int:\n if not root:\n return 0\n return self.getHeight(root.left_child) - self.getHeight(root.right_child)\n\n # inserts a new Node into the tree if the key is not already in the tree\n def insert(self, root, key): \n if not root:\n # The key is not in the tree -> add a new node to the tree\n print(\"ins true\")\n return Node(key)\n elif key == root.key:\n #The key was found in the tree -> dont add the node\n print(\"ins false\")\n pass\n elif key < root.key:\n # if the key is less than the key of the current node insert the new node at the left child\n root.left_child = self.insert(root.left_child, key)\n else:\n # ifthe key is greater than the key of the current node insert the new node at the right child\n root.right_child = self.insert(root.right_child, key)\n\n # Reset the height of the node of the current recusion depth\n root.height = 1 + max(self.getHeight(root.left_child),\n self.getHeight(root.right_child))\n\n # rebalance the tree\n balanceFactor = self.balance(root)\n if balanceFactor > 1:\n if key < root.left_child.key:\n return self.right_rot(root)\n else:\n root.left_child = self.left_rot(root.left_child)\n return self.right_rot(root)\n\n if balanceFactor < -1:\n if key > root.right_child.key:\n return self.left_rot(root)\n else:\n root.right_child = self.right_rot(root.right_child)\n return self.left_rot(root)\n\n return root\n\n # Delete a node from the tree if the key is in the tree\n def delete(self, root, key):\n if not root:\n # The key wasnt found in the tree\n print(\"del false\")\n return root\n elif key < root.key:\n # recursive searching in the left subtree\n root.left_child = self.delete(root.left_child, key)\n elif key > root.key:\n # recursive searching in the right subtree\n root.right_child = self.delete(root.right_child, key)\n else:\n # Key was found in the tree -> delete the node \n if root.left_child is None:\n # Delete the right node if the left node was none\n print(\"del true\")\n tmp = root.right_child\n root = None\n return tmp\n elif root.right_child is None:\n # Delete the left nide if the right node was none\n print(\"del true\")\n tmp = root.left_child\n root = None\n return tmp\n tmp = self.getMinValueNode(root.right_child)\n root.key = tmp.key\n root.right_child = self.delete(root.right_child, tmp.key)\n \n # recursion termination\n if root is None:\n return root\n \n # recalculate the height\n root.height = 1 + max(self.getHeight(root.left_child),\n self.getHeight(root.right_child))\n\n # reballance the tree\n balance = self.balance(root)\n if balance > 1: \n if self.balance(root.left_child) >= 0:\n return self.right_rot(root)\n else:\n root.left = self.left_rot(root.left_child)\n return self.right_rot(root)\n if balance < -1:\n if self.balance(root.right_child) <= 0:\n return self.left_rot(root)\n else:\n root.right_child = self.right_rot(root.right_child)\n return self.left_rot(root)\n return root\n \n\n # search a key in the tree\n def search(self, root, key):\n if not root:\n # the key was not found in the tree\n print(\"search false\")\n return root\n if key == root.key:\n # found the key in the tree\n print(\"search true\")\n return root\n elif key < root.key:\n # search in the left subtree from node as root\n self.search(root.left_child, key)\n elif key > root.key:\n # search in the right subtree from node as root\n self.search(root.right_child, key)\n \n # recursion termination\n if root is None:\n return root\n \n # recalculate height\n root.height = 1 + max(self.getHeight(root.left_child),\n self.getHeight(root.right_child))\n\n return root\n \n # returns the height of a given node\n # return 0 if the node is none\n def getHeight(self, root):\n if not root:\n return 0\n return root.height\n \n # returns the node with the smallest key in the subtree from root\n def getMinValueNode(self, root):\n if root is None or root.left_child is None:\n return root\n return self.getMinValueNode(root.left_child)","repo_name":"LarsBuecker/DuA_Prak_02","sub_path":"avlTree.py","file_name":"avlTree.py","file_ext":"py","file_size_in_byte":6503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27728422560","text":"'''\nAPS-2020\nProblem Description, Input, Output : https://www.codechef.com/COOK119B/problems/CACHEHIT\nCode by : Sagar Kulkarni\n'''\n\nfor _ in range(int(input())):\n n,b,m=map(int,input().split())\n list1=[int(x) for x in input().split()]\n\n blockLoad=1\n fixedVal=list1[0]%b\n\n val1=fixedVal\n val1-=1\n tempList1=[]\n tempInsert1=list1[0]\n while val1>=0:\n tempInsert1-=1\n tempList1.append(tempInsert1)\n val1-=1\n tempList1.reverse()\n tempList1.append(list1[0])\n\n val2=fixedVal\n val2+=1\n tempList2=[]\n tempInsert2=list1[0]\n while val2=0:\n tempInsert1-=1\n tempList1.append(tempInsert1)\n val1-=1\n tempList1.reverse()\n tempList1.append(list1[i])\n\n val2=fixedVal\n val2+=1\n tempList2=[]\n tempInsert2=list1[i]\n while val2 hill_max:\n hill_max = p.value(hill_solution)\n hill_x = hill_solution\n\n # Solve the problem using simulated annealing.\n # annealing_time = time.time()\n annealing_solution = simulated_annealing(\n p,\n exp_schedule(k=20, lam=0.005, limit=1000)\n )\n annealing_avg += annealing_solution\n\n # print (\"Simulated annealing took --- %s seconds ---\" % (time.time()-start_time))\n if p.value(annealing_solution) > annealing_max:\n annealing_max = p.value(annealing_solution)\n annealing_x = annealing_solution\n\n print('Both algorithms randomly restarted 20 times and the biggest and average solution values were recorded.')\n print('Hill-climbing best solution x: ' + str(hill_x)\n + '\\tvalue: ' + str(hill_max)\n )\n print('Hill-climbing average solution :' + str(hill_avg / 10.0)\n )\n\n print('Simulated annealing best solution x: ' + str(annealing_x)\n + '\\tvalue: ' + str(annealing_max)\n )\n print('Simulated annealing average solution :' + str(annealing_avg / 10.0)\n )","repo_name":"zchen0925/Artificial-Intelligence-course-projects","sub_path":"lab02/sine.py","file_name":"sine.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72849619464","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport sys \n#print (sys.argv) useful in de-bugging\nimport csv\n\nChromo = set()\nchromopairs= set()\n\nwith open(sys.argv[1]) as file1:\n for row in csv.reader(file1, delimiter= '\\t'): #for row in fi:\n Chromo.add(row[0].strip())\n\n #return Chromo \n#print (Chromo) #\n \nwith open(sys.argv[2]) as file2:\n for row in csv.reader(file2, delimiter='\\t'):\n if row[0].startswith(\"#\"):\n continue \n \t#could I do a sed before csv reader and convert . to \n attributes=row[8]\n d={} # Here means will make new dictionary for each line\n for item in attributes.split(';'):\n if item: \n pair=(item.split('=')) \n d[pair[0]] = pair[1] \n Gid=d['ID'].split('.')[0] \n \n if Gid in Chromo:\n chromopairs.add(row[0]+'\\t'+Gid)\n \nfor pair in sorted(chromopairs):\n print (pair)\n #colon only needed when cahnge into a different block include def and class","repo_name":"erin-baggs/Script_Drafts","sub_path":"Chromsome_co-ord.py","file_name":"Chromsome_co-ord.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74794589065","text":"class LRUCache:\n\n def __init__(self, capacity: int):\n self.dict = {}\n self.capacity = capacity\n self.queue = collections.deque()\n\n def get(self, key: int) -> int:\n ret = self.dict.get(key, -1)\n if ret == -1:\n return -1\n else:\n self.queue.remove(key) # O(n)\n self.queue.appendleft(key)\n # print(self.queue)\n return ret\n\n def put(self, key: int, value: int) -> None:\n if key not in self.dict:\n self.dict[key] = value\n \n \n if len(self.queue) == self.capacity:\n tmp = self.queue.pop()\n del self.dict[tmp]\n\n self.queue.appendleft(key)\n else:\n self.dict[key] = value\n self.queue.remove(key) # O(n)\n self.queue.appendleft(key)\n # print(self.queue)\n\n\n# Your LRUCache object will be instantiated and called as such:\n# obj = LRUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)\n","repo_name":"novayo/LeetCode","sub_path":"0146_LRU_Cache/try_1.py","file_name":"try_1.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"7621417778","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as T\nfrom datasets import MNISTM, SVHN, USPS\nfrom models import GTA\n\ndef main():\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n transform = T.Compose([\n T.ToPILImage(),\n T.Resize(32),\n T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n ])\n mnistm_dataset = MNISTM(image_path=\"./hw3_data/digits/mnistm/train\", label_path=\"./hw3_data/digits/mnistm/train.csv\", transform=transform)\n svhn_dataset = SVHN(image_path=\"./hw3_data/digits/svhn/train\", label_path=\"./hw3_data/digits/svhn/train.csv\", transform=transform)\n usps_dataset = USPS(image_path=\"./hw3_data/digits/usps/train\", label_path=\"./hw3_data/digits/usps/train.csv\", transform=transform)\n\n batch_size = 64\n mnistm_loader = DataLoader(mnistm_dataset, batch_size=batch_size, shuffle=True, drop_last=True)\n svhn_loader = DataLoader(svhn_dataset, batch_size=batch_size, shuffle=True, drop_last=True)\n usps_loader = DataLoader(usps_dataset, batch_size=batch_size, shuffle=True, drop_last=True)\n \n source_loaders = [usps_loader, svhn_loader]\n target_loaders = [mnistm_loader, usps_loader]\n\n model = GTA(latent_dim=512, batch_size=batch_size, device=device)\n model.move_to_device()\n\n EPOCH = 100\n\n paths = [\"usps-mnistm\", \"svhn-usps\"] \n\n for i in range(len(source_loaders)):\n if not os.path.exists(paths[i]):\n os.makedirs(paths[i])\n os.makedirs(os.path.join(paths[i], \"C\"))\n os.makedirs(os.path.join(paths[i], \"F\"))\n\n steps = min(len(source_loaders[i]), len(target_loaders[i]))\n model.reset_all_weights()\n \n for epoch in range(EPOCH):\n \n D_loss = 0.0\n G_loss = 0.0\n C_loss = 0.0\n F_loss = 0.0\n\n source_data = iter(source_loaders[i])\n target_data = iter(target_loaders[i])\n \n for step in range(steps):\n # Source data\n source_inputs, source_labels = next(source_data)\n \n source_labels_onehot = torch.zeros((source_inputs.size(0), 10), dtype=torch.float)\n for j in range(source_inputs.size(0)):\n source_labels_onehot[j][source_labels[j]] = 1\n\n source_inputs, source_labels, source_labels_onehot = source_inputs.to(device), source_labels.to(device), source_labels_onehot.to(device)\n \n # Target data\n target_inputs, _ = next(target_data)\n target_inputs = target_inputs.to(device)\n \n\n d_loss = model.train_D(source_inputs, source_labels, source_labels_onehot, target_inputs)\n g_loss = model.train_G(source_labels)\n c_loss = model.train_C(source_inputs, source_labels)\n f_loss = model.train_F(source_labels)\n\n print(\"\\rTask: [{}/3], Epoch: [{}/{}], Step: [{}/{}], D_loss: {:.5f}, G_loss: {:.5f}, C_loss: {:.5f}, F_loss: {:.5f}\".format(i+1, epoch+1, EPOCH, step+1, steps, d_loss, g_loss, c_loss, f_loss), end=\"\")\n\n D_loss += d_loss\n G_loss += g_loss\n C_loss += c_loss\n F_loss += f_loss\n \n print(\"\")\n\n print(\"\\n\\nEpoch {}: D_loss = {:.5f}, G_loss = {:.5f}, C_loss = {:.5f}, F_loss = {:.5f}\".format(epoch+1, D_loss/steps, G_loss/steps, C_loss/steps, F_loss/steps))\n \n model.save_C(os.path.join(paths[i], \"C\", \"{}.pkl\".format(str(epoch+1).zfill(2))))\n model.save_F(os.path.join(paths[i], \"F\", \"{}.pkl\".format(str(epoch+1).zfill(2))))\n\nif __name__ == \"__main__\":\n main()","repo_name":"hhccode/Deep-Learning-for-Computer-Vision","sub_path":"dlcv_hw3/train/gta_train.py","file_name":"gta_train.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42731800727","text":"import json\nimport base64\n\n\nclass CLMConnection(object):\n def _encode_b64(self, s):\n # Convert into byte type\n s_byte = s.encode(\"utf-8\")\n # encode into base64 (in byte type)\n s_b64 = base64.urlsafe_b64encode(s_byte)\n # return into str type\n return s_b64.decode('utf-8')\n\n def _remove_final_slash(self, url):\n if url.endswith('/'):\n return url[:-1]\n else:\n return url\n\n def __init__(self, username, password,\n api_nsp, api_clm, api_version, disable_proxy=False, proxy={},\n debug=False, force_auth=False):\n \n self.base_nsp_url = '%s/rest-gateway/rest/api/v%s/' % (self._remove_final_slash(api_nsp), api_version)\n self.base_clm_url = '%s/license-manager/rest/api/v%s/' % (self._remove_final_slash(api_clm), api_version)\n self.username = username\n # in byte\n auth_base64 = self._encode_b64(username + \":\" + password)\n self.headers = {\n 'Authorization': \"Basic %s\" % auth_base64,\n 'Content-Type': \"application/json\",\n }\n self.debug = debug\n self.force_auth = force_auth\n if disable_proxy:\n self.proxies = {\n \"http\": None,\n \"https\": None\n }\n else:\n if proxy:\n self.proxies = proxy\n else:\n self.proxies = None\n\n def _do_request(self, method, url, headers=None, params=None):\n import requests\n requests.packages.urllib3.disable_warnings()\n try:\n data = json.dumps(params) if params is not None else None\n if self.debug:\n print_headers = \"# Headers:\"\n for line in json.dumps(headers, indent=4).split('\\n'):\n print_headers += '\\n# %s' % line\n print('#####################################################')\n print('# Request')\n print('# Method: %s' % method)\n print('# URL: %s' % url)\n print(print_headers)\n print(\"# Parameters: %s\" % data)\n print('#####################################################')\n response = requests.request(method, url, headers=headers,\n verify=False, timeout=10, data=data,\n proxies=self.proxies)\n except requests.exceptions.RequestException as error:\n print('Error: Unable to connect.')\n print('Detail: %s' % error)\n raise SystemExit(1)\n if self.debug:\n print_headers = \"# Headers:\"\n for line in json.dumps(dict(response.headers),\n indent=4).split('\\n'):\n print_headers += '\\n# %s' % line\n print('# Response')\n print('# Status code: %s' % response)\n print(print_headers)\n print('# Body: %s' % response.text)\n print('#####################################################')\n print('')\n return response\n\n def _response(self, resp):\n if resp.status_code == 401:\n print('Error: Athentication failed. '\n 'Please verify your credentials.')\n raise SystemExit(1)\n if resp.status_code < 200 or resp.status_code >= 300:\n try:\n print('Error: %s' % resp.json()['errors'][0]\n ['descriptions'][0]['description'])\n raise SystemExit(1)\n except ValueError:\n print('Unknown Error: CLM returns\\n%s' % resp.text)\n raise SystemExit(1)\n if resp.text == '':\n return []\n return resp.json()\n\n def remove_extra_slash_url(func):\n def wrapper(self, url, *args, **kwargs):\n if url.startswith('/'):\n return func(self, url[1:], *args, **kwargs)\n else:\n return func(self, url, *args, **kwargs)\n return wrapper\n\n @remove_extra_slash_url\n def get(self, url, filter=None, headers={}):\n self.authenticate()\n h = self.headers.copy()\n h.update(headers)\n\n r = self._do_request('GET', self.base_clm_url + url,\n headers=h)\n return self._response(r)\n\n @remove_extra_slash_url\n def post(self, url, params, headers={}):\n self.authenticate()\n h = self.headers.copy()\n h.update(headers)\n r = self._do_request('POST', self.base_clm_url + url,\n headers=h, params=params)\n return self._response(r)\n\n @remove_extra_slash_url\n def put(self, url, params, headers={}):\n self.authenticate()\n h = self.headers.copy()\n h.update(headers)\n r = self._do_request('PUT', self.base_clm_url + url,\n headers=h, params=params)\n return self._response(r)\n\n @remove_extra_slash_url\n def delete(self, url):\n self.authenticate()\n r = self._do_request('DELETE', self.base_clm_url + url,\n headers=self.headers)\n return self._response(r)\n\n def authenticate(self):\n import os\n import time\n\n data_dir = '%s/.clm' % os.path.expanduser(\"~\")\n Token_file = data_dir + '/NSPToken'\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\n do_auth = False\n if os.path.exists(Token_file) and not self.force_auth:\n with open(Token_file) as data_file:\n token_session = json.load(data_file)\n # replay auth if expire\n if int(token_session['token_creation']) + token_session['expires_in'] < int(time.time()):\n do_auth = True\n\n else:\n do_auth = True\n\n if do_auth:\n body = { \"grant_type\": \"client_credentials\"}\n r = self._do_request('POST', self.base_nsp_url + 'auth/token',\n headers=self.headers, params=body)\n rjson = self._response(r)\n self.headers['Authorization'] = \"Bearer %s\" % rjson['access_token']\n token_session = {'access_token': rjson['access_token'],\n 'refresh_token': rjson['refresh_token'],\n 'expires_in': rjson['expires_in'],\n 'token_creation': time.time()}\n with open(Token_file, 'w') as data_file:\n json.dump(token_session, data_file)\n else:\n self.headers['Authorization'] = \"Bearer %s\" % token_session['access_token']\n\n","repo_name":"maxiterr/openclm","sub_path":"open_clmcli/clm_client.py","file_name":"clm_client.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40946617369","text":"from abc import abstractmethod\nfrom enum import Enum\n\nfrom pyschism.forcing.bctides.bctypes import Bctype\nfrom pyschism.forcing import hycom\n\n\nclass Isatype(Bctype):\n @property\n @abstractmethod\n def isatype(self) -> int:\n pass\n\n\nclass UniformTimeHistorySalinity(Isatype):\n def __init__(self, time_history):\n raise NotImplementedError(f\"{self.__class__.__name__}\")\n self.time_history = time_history\n\n @property\n def isatype(self) -> int:\n return 1\n\n\nclass ConstantSalinity(Isatype):\n def __init__(self, value):\n raise NotImplementedError(f\"{self.__class__.__name__}\")\n self.value = value\n\n @property\n def isatype(self) -> int:\n return 2\n\n\nclass SalinityInitialConditions(Isatype):\n def __init__(self):\n raise NotImplementedError(f\"{self.__class__.__name__}\")\n\n @property\n def isatype(self):\n return 3\n\n\nclass SpatiallyVaryingTimeHistorySalinity(Isatype):\n class BaroclinicDatabases(Enum):\n RTOFS = hycom.RTOFS\n GOFS = hycom.GOFS\n\n def __init__(\n self,\n data_source=\"gofs\",\n nudge: bool = True,\n rlmax=1.5,\n rnu_day=0.25,\n ):\n if isinstance(data_source, str):\n data_source = self.BaroclinicDatabases[data_source.upper()].value()\n if not isinstance(data_source, hycom.Hycom):\n raise TypeError(\n \"Argument data_source must be of type str or type \"\n f\"{type(hycom.Hycom)}, not type {type(data_source)}.\"\n )\n self.data_source = data_source\n self.data_component = data_source.salinity\n self.nudge = nudge\n self.rlmax = rlmax\n self.rnu_day = rnu_day\n\n def get_boundary_string(self, *args, **kwargs):\n return \"1.\"\n\n @property\n def isatype(self):\n return 4\n\n\nIsatype1 = UniformTimeHistorySalinity\nIsatype2 = ConstantSalinity\nIsatype3 = SalinityInitialConditions\nIsatype4 = SpatiallyVaryingTimeHistorySalinity\n","repo_name":"TrellixVulnTeam/pyschism-sciclone-tests_5BBQ","sub_path":"pyschism/pyschism/forcing/bctides/isatype.py","file_name":"isatype.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28371908311","text":"documents = [\n {\"type\": \"passport\", \"number\": \"2207 876234\", \"name\": \"Василий Гупкин\"},\n {\"type\": \"invoice\", \"number\": \"11-2\", \"name\": \"Геннадий Покемонов\"},\n {\"type\": \"insurance\", \"number\": \"10006\", \"name\": \"Аристарх Павлов\"}\n ]\n \ndirectories = {\n '1': ['2207 876234', '11-2'],\n '2': ['10006'],\n '3': []\n }\n\n\n\ndef get_people():\n print ('Введите номер документа')\n number = input()\n for item in documents:\n if (number == item['number']):\n print (item['name'])\n return\n # else: \n # print ('Нет такого пользователя')\n\n# p = get_people()\n\n\ndef get_list():\n for item in documents:\n print (\"\\\"\" + item['type'] + \"\\\" \" + \"\\\"\" + item['number'] + \"\\\"\" + \" \" + \"\\\"\"+ item['name'] + \"\\\"\") \n \n# l = get_list()\n\ndef get_shelf():\n print ('Введите номер документа')\n number = input()\n for key, value in directories.items():\n # print (value)\n # print('{0} {1}'. format(key, value))\n if (item in value):\n print (key)\n \n# s = get_shelf()\n\ndef get_add():\n print ('Введите номер документа')\n number = input()\n print ('Введите тип документа')\n document = input()\n print ('Введите имя владельца')\n name = input()\n print ('Введите номер полки')\n direct = input()\n documents.append( {\"type\":document, \"number\": number, \"name\": name}) \n print (\"Данные занесены\")\n for key, value in directories.items():\n if (key == direct):\n value.append(number)\n print('{0} {1}'. format(key, value))\n return\n\nwhile (input('Введите символ и нажмите enter для запуска программы или нажмите enter для выхода из нее') != \"\"):\n # a = get_add()\n print (\"Введите команду p, l , s , a\")\n user = input()\n if (user == 'a'):\n a = get_add()\n elif (user == 's'):\n s = get_shelf()\n elif (user == 'l'):\n l = get_list()\n elif (user == 'p'):\n p = get_people()","repo_name":"kmv1712/Netology","sub_path":"Python_Homework/lesson_1.4/lesson_1.4.py","file_name":"lesson_1.4.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19755569221","text":"n = int(input())\n\npackVal = 1\ncountVal = 0\n\nfor i in range(1, n+1):\n packVal *= i\n\nfor item in str(packVal)[::-1]:\n if item == \"0\": countVal += 1\n else:\n print(countVal)\n break","repo_name":"kimjunsung04/baekjoon","sub_path":"06 Silver5/1676.py","file_name":"1676.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"71219562505","text":"import configparser\nimport RPi.GPIO as GPIO\nimport time\n\ndef readConfig():\n try:\n config_object = configparser.ConfigParser()\n config_object.read(\"config-led.conf\")\n if config_object.has_section(\"config-for-led\"):\n pass\n else:\n raise IOError\n return config_object[\"config-for-led\"]\n except IOError:\n config={\n \"ledLimitedValue\": 0.0,\n \"ledFrequency\": 0.0\n }\n print(\"Led config not found\")\n return config\n\ndef rc_time (pin_to_circuit):\n count = 0\n GPIO.setup(pin_to_circuit, GPIO.OUT)\n GPIO.output(pin_to_circuit, GPIO.LOW)\n time.sleep(0.1)\n GPIO.setup(pin_to_circuit, GPIO.IN)\n while (GPIO.input(pin_to_circuit) == GPIO.LOW):\n count += 1\n return count\n\ndef ledBlinking():\n config = readConfig()\n ledLimitedValue = config[\"ledLimitedValue\"]\n ledFrequency = config[\"ledFrequency\"]\n sleepTime=0.1*float(ledFrequency)\n GPIO.setmode(GPIO.BOARD)\n light_sensor_gpio_pin = 3\n led_gpio_pin = 16\n GPIO.setup(led_gpio_pin, GPIO.OUT, initial=GPIO.LOW)\n countTime = 0\n try:\n # Main loop\n print(float(ledLimitedValue) < float(rc_time(light_sensor_gpio_pin)))\n while True:\n if countTime > 15.0:\n print(\"Config read\")\n config = readConfig()\n ledLimitedValue = config[\"ledLimitedValue\"]\n ledFrequency = config[\"ledFrequency\"]\n countTime = 0\n sleepTime=0.1*float(ledFrequency)\n if float(ledLimitedValue) < float(rc_time(light_sensor_gpio_pin)):\n GPIO.output(led_gpio_pin, GPIO.HIGH) # Turn on\n time.sleep(sleepTime)\n GPIO.output(led_gpio_pin, GPIO.LOW) # Turn off\n time.sleep(sleepTime)\n countTime += 2 * sleepTime\n except KeyboardInterrupt:\n pass\n finally:\n GPIO.cleanup()\n\nif __name__ == \"__main__\":\n ledBlinking()\n","repo_name":"theDuro/IoT-Paltform","sub_path":"ledDriver.py","file_name":"ledDriver.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72264516426","text":"#!/usr/bin/python\nimport unittest\nimport string\nimport random\nfrom SimplePythonBloomFilter import BloomFilter\n\nclass TestBloomFilter(unittest.TestCase):\n\tdef test_basic_use_case( self ):\n\t\t\"\"\"Creates, adds and tests a BloomFilter.\"\"\"\n\t\tself.__bloom_helper( 3, 0.001 )\n\t\tself.__bloom_helper( 5, 0.001 )\n\t\tself.__bloom_helper( 30, 0.001 )\n\t\tself.__bloom_helper( 50, 0.001 )\n\t\tself.__bloom_helper( 300, 0.001 )\n\t\tself.__bloom_helper( 500, 0.001 )\n\t\tself.__bloom_helper( 3000, 0.001 )\n\t\tself.__bloom_helper( 5000, 0.001 )\n\n\tdef __bloom_helper(self, capacity, error_rate):\n\t\t# Helper function that initializes a filter, adds randomly generated elements\n\t\t# and asserts they are found in the filter. Also checks that the calculated\n\t\t# false positive rate returned by the filter (after rounding) matches the rate\n\t\t# used to initialize it.\n\t\tbfilter = BloomFilter( capacity, error_rate )\n\t\tfor _ in range( capacity ):\n\t\t\trstring = ''.join( random.choice( string.printable ) for _ in range( 10 ) )\n\t\t\tbfilter.add( rstring )\n\t\t\tself.assertTrue( rstring in bfilter )\n\t\tself.assertTrue( bfilter.count() == capacity )\n\n\t\tprecision = len( str( error_rate )[2:] )\n\t\trounded_fp_rate = round( bfilter.fp_rate(), precision )\n\t\tself.assertTrue( rounded_fp_rate == error_rate )\n\n\tdef test_calculate_space( self ):\n\t\t\"\"\"Test math against known values verified by Wolfram Alpha\"\"\"\n\t\tspace = BloomFilter.calculate_space( 1000, 0.05 )\n\t\tself.assertTrue( space == 6236 )\n\t\t\n\t\tspace = BloomFilter.calculate_space( 1000, 0.005 )\n\t\tself.assertTrue( space == 11028 )\n\n\t\tspace = BloomFilter.calculate_space( 1000, 0.001 )\n\t\tself.assertTrue( space == 14378 )\n\n\t\tspace = BloomFilter.calculate_space( 2000, 0.05 )\n\t\tself.assertTrue( space == 12471 )\n\n\t\tspace = BloomFilter.calculate_space( 2000, 0.005 )\n\t\tself.assertTrue( space == 22056 )\n\n\t\tspace = BloomFilter.calculate_space( 2000, 0.001 )\n\t\tself.assertTrue( space == 28756 )\n\n\t\tspace = BloomFilter.calculate_space( 3000, 0.05 )\n\t\tself.assertTrue( space == 18706 )\n\n\t\tspace = BloomFilter.calculate_space( 3000, 0.005 )\n\t\tself.assertTrue( space == 33084 )\n\t\t\n\t\tspace = BloomFilter.calculate_space( 3000, 0.001 )\n\t\tself.assertTrue( space == 43133 )\n\n\tdef test_calculate_hash_count( self ):\n\t\t\"\"\"Test math against known values verified by Wolfram Alpha.\"\"\"\n\t\thash_count = BloomFilter.calculate_hash_count( 1000, 10000 )\n\t\tself.assertTrue( hash_count == 7 )\n\t\t\n\t\thash_count = BloomFilter.calculate_hash_count( 1000, 20000 )\n\t\tself.assertTrue( hash_count == 14 )\n\t\t\n\t\thash_count = BloomFilter.calculate_hash_count( 1000, 30000 )\n\t\tself.assertTrue( hash_count == 21 )\n\t\t\n\t\thash_count = BloomFilter.calculate_hash_count( 2000, 10000 )\n\t\tself.assertTrue( hash_count == 3 )\n\t\t\n\t\thash_count = BloomFilter.calculate_hash_count( 2000, 20000 )\n\t\tself.assertTrue( hash_count == 7 )\n\t\t\n\t\thash_count = BloomFilter.calculate_hash_count( 2000, 30000 )\n\t\tself.assertTrue( hash_count == 10 )\n\t\t\n\t\thash_count = BloomFilter.calculate_hash_count( 3000, 10000 )\n\t\tself.assertTrue( hash_count == 2 )\n\t\t\n\t\thash_count = BloomFilter.calculate_hash_count( 3000, 20000 )\n\t\tself.assertTrue( hash_count == 5 )\n\t\t\n\t\thash_count = BloomFilter.calculate_hash_count( 3000, 30000 )\n\t\tself.assertTrue( hash_count == 7 )\n\n\tdef test_default_hashfn( self ):\n\t\t\"\"\"Test that hashfn returned is callable, that it can take two\n\t\targuments, and that the response can be passed to the function\n\t\tlong() without raising errors.\"\"\"\n\t\thashfn = BloomFilter.default_hashfn()\n\t\tself.assertTrue( callable( hashfn ) )\n\t\tself.assertTrue( long( hashfn( \"test\", 0) ) )\n\n\t\nif __name__ == '__main__':\n\tunittest.main()\n","repo_name":"Geraden07/SimplePythonBloomFilter","sub_path":"Tests.py","file_name":"Tests.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24395566449","text":"#!/usr/bin/python3\n\n\n\"\"\" Function to get the Integer as the input from the User and find the factors of that number \"\"\"\n\n\ndef fact_func():\n num = int(input(\"Enter integer:\"))\n print(num)\n fact_list =[]\n \n for i in range(1,num+1):\n if num % i == 0:\n fact_list.append(i)\n return fact_list\n\n\n\"\"\" Driver Function \"\"\"\n\n\nif __name__ == \"__main__\":\n pos = int(input(\"Enter position:\"))\n print(pos)\n new_list = fact_func()\n enum_list = list(enumerate(new_list,start=1))\n #print(enum_list)\n for i,val in enum_list:\n if i == pos:\n print(\"The value is\",val)\n else: \n continue\n","repo_name":"ManivannanGit/CodeBlock","sub_path":"FactorFinder.py","file_name":"FactorFinder.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18203990847","text":"import sys\nimport heapq\ninput = sys.stdin.readline\n\n# v: 선호도, c: 도수\nN, M, K = map(int, input().split())\nbeers = []\nfor _ in range(K):\n v, c = map(int, input().split())\n beers.append([v, c])\n\n# 도수가 낮은 순으로 정렬\nbeers.sort(key=lambda x: (x[1]))\npreference = 0\nheap = []\n\nfor beer in beers:\n preference += beer[0]\n heapq.heappush(heap, beer[0])\n\n # N병 만큼 마시면\n if len(heap) == N:\n if preference >= M:\n answer = beer[1] # 도수가 낮은 순으로 정렬 되어 있기에\n break\n else:\n # 선호도를 못채우면\n preference -= heapq.heappop(heap) # 선호도가 제일 낮은 맥주를 뺀다\n\nif len(heap) != N:\n print(-1)\nelse:\n print(answer)\n","repo_name":"heejun32/Algorithm","sub_path":"BaekJoon/17503_again.py","file_name":"17503_again.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30260624559","text":"import csv\nfrom nucleo.LinkedList import *\nfrom nucleo.Producto import Producto\nclass Memoria:\n def __init__(self):\n self.contenido = \"\"\n self.lista = self.obtenerLinkedList()\n\n def setMaximo(self, maximo):\n self.maximo = maximo\n return True\n\n def getMaximo(self):\n tamanio = self.lista.GetSize()\n if(tamanio ==0):\n return 0\n idProducto = self.lista.LinkedListSearchIndex(tamanio-1)\n obj = self.lista.getObjProducto(idProducto)\n maximo = obj.getIdProducto()\n return maximo+1\n\n def setLista(self, lista):\n self.lista = lista\n return True\n \n def getLista(self):\n lista = self.lista\n return lista\n\n def getContenidoLinkedList(self):\n lista = self.lista\n tamanio = lista.GetSize()\n contenido = \"\"\n if(tamanio ==0):\n return \"False\"\n for i in range(tamanio):\n if(i ==0):\n idProducto = self.lista.LinkedListSearchIndex(i)\n\n objProducto = lista.getObjProducto(idProducto)\n \n idProducto = str(objProducto.getIdProducto())\n nombre = objProducto.getNombreProducto()\n moneda = objProducto.getMoneda()\n costo = str(objProducto.getCosto())\n descripcion = objProducto.getDescripcion()\n contenido = \"%s,%s,%s,%s,%s\\n\" %(idProducto,nombre,moneda,costo,descripcion)\n else:\n idProducto = self.lista.LinkedListSearchIndex(i)\n\n objProducto = lista.getObjProducto(idProducto)\n \n idProducto = str(objProducto.getIdProducto())\n nombre = objProducto.getNombreProducto()\n moneda = objProducto.getMoneda()\n costo = str(objProducto.getCosto())\n descripcion = objProducto.getDescripcion()\n contenido += \"%s,%s,%s,%s,%s\\n\" %(idProducto,nombre,moneda,costo,descripcion)\n \n return contenido\n\n def obtenerLinkedList(self):\n lista = LinkedList()\n contenido = self.getContenidoCsv().split(\"\\n\")\n if(len(contenido)>1):\n for i in range(len(contenido)-1):\n columna = contenido[i].split(\",\")\n idProducto = columna[0]\n nombreProducto = columna[1]\n moneda = columna[2]\n costo = columna[3]\n descripcion = columna[4]\n\n objProducto = Producto()\n objProducto.setIdProducto(int(columna[0]))\n objProducto.setNombreProducto(columna[1])\n objProducto.setMoneda(columna[2])\n objProducto.setCosto(float(columna[3]))\n objProducto.setDescripcion(columna[4])\n\n lista.LinkedListAdd(objProducto)\n self.lista = lista\n return lista\n \n \n \n\n\n\n \n def generarCsv(self):\n contenido = self.getContenidoLinkedList()\n if(isinstance(contenido,bool)):\n return False\n\n f= open('nucleo/Memoria/CSV.csv','w')\n f.write(contenido)\n f.close()\n return True\n\n def getContenidoCsv(self):\n f = open(\"nucleo/Memoria/CSV.csv\", \"r\")\n contenido =f.read()\n f.close()\n return contenido\n\n \n\n\n\n\n","repo_name":"mkhi26/Inventario","sub_path":"python/nucleo/Memoria.py","file_name":"Memoria.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11542216514","text":"from optparse import make_option\n\nfrom chef import Node as ChefNode\n\nfrom pymongo.errors import DuplicateKeyError\n\nfrom gecoscc.management import BaseCommand\nfrom gecoscc.utils import (_get_chef_api, register_or_updated_node,\n update_node, SOURCE_DEFAULT, toChefUsername)\n\n\nclass Command(BaseCommand):\n description = \"\"\"\n Import existing nodes in chef server.\n\n \"\"\"\n\n usage = \"usage: %prog config_uri create_chef_nodes --administrator user \"\\\n \"--key file.pem\"\n\n option_list = [\n make_option(\n '-a', '--administrator',\n dest='chef_username',\n action='store',\n help='An existing chef administrator username'\n ),\n make_option(\n '-k', '--key',\n dest='chef_pem',\n action='store',\n help='The pem file that contains the chef administrator private key'\n ),\n ]\n\n required_options = (\n 'chef_username',\n 'chef_pem',\n )\n\n def create_root_ou(self, ou_name):\n data = {'name': ou_name,\n 'type': 'ou'}\n ou = self.db.nodes.find_one(data)\n if ou:\n print(\"OU with name 'ou_0' already exists in mongo\")\n else:\n data.update({'extra': '',\n 'path': 'root',\n 'lock': False,\n 'policies': {},\n 'source': SOURCE_DEFAULT})\n ou_id = self.db.nodes.insert_one(data).inserted_id\n print(\"OU with name 'ou_0' created in mongo\")\n ou = self.db.nodes.find_one({'_id': ou_id})\n return ou\n\n def command(self):\n api = _get_chef_api(self.settings.get('chef.url'),\n toChefUsername(self.options.chef_username),\n self.options.chef_pem,\n self.settings.get('chef.ssl.verify'),\n self.settings.get('chef.version'))\n ou_name = 'ou_0'\n ou = self.create_root_ou(ou_name)\n for node_id in ChefNode.list():\n try:\n node_mongo_id = register_or_updated_node(api, node_id, ou,\n self.db.nodes)\n except DuplicateKeyError:\n node_mongo_id = update_node(api, node_id, ou, self.db.nodes)\n if not node_mongo_id:\n print(\"%s does not exists\" % node_id)\n","repo_name":"gecos-team/gecoscc-ui","sub_path":"gecoscc/commands/import_chef_nodes.py","file_name":"import_chef_nodes.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"2220121139","text":"#!/usr/bin/python\n\n######################################\n# Author: Chris Grabosky\n# Email: chris.grabosky@mongodb.com\n# GitHub: graboskyc\n# About: deploys a blueprint\n# Deps: boto3, ConfigParser, paramiko, scp pkgs installed. \n# aws config file installed for user via aws cli tools `aws configure`\n# Config file in ~/.gskyaws.conf\n# Need ~/.ansible.cfg with [defaults] host_key_checking = False\n# Refs: https://github.com/graboskyc/MongoDBInit\n# https://raw.githubusercontent.com/graboskyc/MongoDBInit/master/updateAWSSG.sh\n######################################\n\nimport os\nimport sys\nimport datetime\nimport uuid\nfrom configparser import SafeConfigParser\nimport argparse\nimport yaml\nimport urllib.request\nimport time\nfrom . ChangeManagement import ChangeManagement\nfrom . Table import Table\nfrom . AWS import AWS\nfrom . Tasks import Tasks\nfrom . Logger import Logger\nfrom . Atlas import Atlas\nfrom . Graph import Graph\n\n# recusrsive function to check to make sure instances are up\ndef r_checkStatus(region, uid):\n up=True\n aws = AWS(region)\n reservations = aws.getInstances([{\"Name\":\"tag:use-group\", \"Values\":[uid]}])\n for r in reservations[\"Reservations\"]:\n for i in r[\"Instances\"]:\n if i[\"State\"][\"Name\"] != \"running\":\n up=False\n \n if(not up):\n time.sleep(10)\n sys.stdout.write(\".\")\n sys.stdout.flush()\n r_checkStatus(region, uid)\n\n return reservations\n\ndef cli():\n # Create your blueprint. if not specified, this is what we deploy.\n blueprint = []\n blueprint.append({\"name\":'DB1', \"os\":\"ubuntu\", \"size\":\"t2.micro\"})\n blueprint.append({\"name\":'DB2', \"os\":\"ubuntu\", \"size\":\"t2.micro\"})\n blueprint.append({\"name\":'DB3', \"os\":\"ubuntu\", \"size\":\"t2.micro\"})\n blueprint.append({\"name\":'Ops Mgr', \"os\":\"ubuntu\", \"size\":\"t2.large\"})\n bpname = \"Sample\"\n bpdesc = \"\"\n bppurpose = \"other\"\n\n # parse cli arguments\n parser = argparse.ArgumentParser(description='CLI Tool to easily deploy a blueprint to AWS instances or MongoDB Atlas clusters')\n parser.add_argument('-b', action=\"store\", dest=\"blueprint\", help=\"path to the blueprint\")\n parser.add_argument(\"-s\", \"--sample\", help=\"download a sample blueprint yaml\", action=\"store_true\")\n parser.add_argument('-d', action=\"store\", dest=\"days\", help=\"how many days should we reserve this for before reaping\")\n parser.add_argument('-k', action=\"store\", dest=\"keypath\", help=\"ssh private key location, required if using tasks\")\n parser.add_argument(\"-l\", \"--list\", help=\"instead of deploying, just list deployed instances for a given deployment (use -u flag or defaults to your user)\", action=\"store_true\")\n parser.add_argument(\"-g\", \"--graph\", help=\"instead of deploying, just build ~/graph.html of deployed instances for a given deployment (use -u flag or defaults to your user)\", action=\"store_true\")\n parser.add_argument(\"-p\", \"--pause\", help=\"stop or pause a deployment (use -u flag)\", action=\"store_true\")\n parser.add_argument(\"-r\", \"--restart\", help=\"restart or unpause a deployment (use -u flag)\", action=\"store_true\")\n parser.add_argument(\"-t\", \"--terminate\", help=\"terminate a deployment (use -u flag)\", action=\"store_true\")\n parser.add_argument('-u', action=\"store\", dest=\"uuid\", help=\"when listing or deleting, the uuid of the deployment\")\n arg = parser.parse_args()\n\n # list a running blueprint\n if arg.list or arg.graph:\n cp = SafeConfigParser()\n cp.read(os.path.expanduser('~') + \"/.aws/config\")\n region = cp.get(\"default\",\"region\")\n aws = AWS(region)\n if arg.uuid != None:\n reservations = aws.getInstances([{\"Name\":\"tag:use-group\", \"Values\":[arg.uuid]}])\n else:\n conf = {}\n with open(os.path.expanduser('~') + \"/.gskyaws.conf\", 'r') as cf:\n for line in cf:\n temp = line.split(\"=\")\n conf[temp[0]] = temp[1].replace('\"',\"\").replace(\"\\n\",\"\")\n reservations = aws.getInstances([{\"Name\":\"tag:owner\", \"Values\":[conf[\"name\"]]}])\n print(\"\")\n print(\"Here is your existing deployment:\")\n \n if arg.list:\n tbl = Table()\n tbl.AddHeader([\"Name\", \"Pub DNS Name\", \"Public Addr\", \"Private Addr\", \"Deployment ID\", \"BP Name\", \"Expires\", \"State\"])\n for r in reservations[\"Reservations\"]:\n for i in r[\"Instances\"]:\n did=\"\"\n name=\"\"\n expire=\"\"\n bpn=\"\"\n pubdns=\"\"\n pubip=\"\"\n privip=\"\"\n for tag in i[\"Tags\"]:\n if tag[\"Key\"] == \"use-group\":\n did=tag[\"Value\"]\n if tag[\"Key\"] == \"blueprint-name\":\n bpn=tag[\"Value\"]\n if tag[\"Key\"] == \"Name\":\n name=tag[\"Value\"]\n if tag[\"Key\"] == \"expire-on\":\n expire=tag[\"Value\"]\n if \"PublicDnsName\" in i:\n pubdns=i[\"PublicDnsName\"]\n if \"PublicIpAddress\" in i:\n pubip=i[\"PublicIpAddress\"]\n if \"PrivateIpAddress\" in i:\n privip=i[\"PrivateIpAddress\"]\n tbl.AddRow([name, pubdns,pubip,privip,did,bpn,expire,i[\"State\"][\"Name\"]])\n tbl.Draw()\n elif arg.graph:\n path = os.path.expanduser('~') + \"/graph.html\"\n g = Graph()\n g.buildChart(path,reservations[\"Reservations\"])\n print(\"Check ~/graph.html for your deployment visualized.\")\n print(\"\")\n sys.exit(0)\n\n # pause or stop\n if arg.pause:\n cp = SafeConfigParser()\n cp.read(os.path.expanduser('~') + \"/.aws/config\")\n region = cp.get(\"default\",\"region\")\n aws = AWS(region)\n if arg.uuid != None:\n reservations = aws.getInstances([{\"Name\":\"tag:use-group\", \"Values\":[arg.uuid]}])\n else:\n print(\"When pausing a blueprint, you must provide the -u option\")\n sys.exit(4)\n print(\"\")\n print(\"Pausing your deployment...\")\n iid=[]\n tbl = Table()\n tbl.AddHeader([\"Name\", \"Pub DNS Name\", \"Deployment ID\", \"BP Name\", \"Expires\", \"State\"])\n for r in reservations[\"Reservations\"]:\n for i in r[\"Instances\"]:\n did=\"\"\n name=\"\"\n expire=\"\"\n bpn=\"\"\n for tag in i[\"Tags\"]:\n if tag[\"Key\"] == \"use-group\":\n did=tag[\"Value\"]\n if tag[\"Key\"] == \"blueprint-name\":\n bpn=tag[\"Value\"]\n if tag[\"Key\"] == \"Name\":\n name=tag[\"Value\"]\n if tag[\"Key\"] == \"expire-on\":\n expire=tag[\"Value\"]\n tbl.AddRow([name, i[\"PublicDnsName\"],did,bpn,expire,\"Pausing\"])\n iid.append(i[\"InstanceId\"])\n response = aws.pauseInstances(iid)\n tbl.Draw()\n print(\"\")\n sys.exit(0)\n\n # unpause or restart\n if arg.restart:\n cp = SafeConfigParser()\n cp.read(os.path.expanduser('~') + \"/.aws/config\")\n region = cp.get(\"default\",\"region\")\n aws = AWS(region)\n if arg.uuid != None:\n reservations = aws.getInstances([{\"Name\":\"tag:use-group\", \"Values\":[arg.uuid]}])\n else:\n print(\"When restarting or unpausing a blueprint, you must provide the -u option\")\n sys.exit(4)\n print(\"\")\n print(\"Unpausing your deployment...\")\n iid=[]\n tbl = Table()\n tbl.AddHeader([\"Name\", \"Deployment ID\", \"BP Name\", \"Expires\", \"State\"])\n for r in reservations[\"Reservations\"]:\n for i in r[\"Instances\"]:\n did=\"\"\n name=\"\"\n expire=\"\"\n bpn=\"\"\n for tag in i[\"Tags\"]:\n if tag[\"Key\"] == \"use-group\":\n did=tag[\"Value\"]\n if tag[\"Key\"] == \"blueprint-name\":\n bpn=tag[\"Value\"]\n if tag[\"Key\"] == \"Name\":\n name=tag[\"Value\"]\n if tag[\"Key\"] == \"expire-on\":\n expire=tag[\"Value\"]\n tbl.AddRow([name,did,bpn,expire,\"Starting\"])\n iid.append(i[\"InstanceId\"])\n response = aws.unpauseInstances(iid)\n tbl.Draw()\n print(\"\")\n sys.exit(0)\n\n # unpause or restart\n if arg.terminate:\n cp = SafeConfigParser()\n cp.read(os.path.expanduser('~') + \"/.aws/config\")\n region = cp.get(\"default\",\"region\")\n aws = AWS(region)\n if arg.uuid != None:\n reservations = aws.getInstances([{\"Name\":\"tag:use-group\", \"Values\":[arg.uuid]}])\n else:\n print(\"When terminating a blueprint, you must provide the -u option\")\n sys.exit(4)\n print(\"\")\n print(\"Terminating your deployment...\")\n iid=[]\n tbl = Table()\n tbl.AddHeader([\"Name\", \"Deployment ID\", \"BP Name\", \"Expires\", \"State\"])\n for r in reservations[\"Reservations\"]:\n for i in r[\"Instances\"]:\n did=\"\"\n name=\"\"\n expire=\"\"\n bpn=\"\"\n for tag in i[\"Tags\"]:\n if tag[\"Key\"] == \"use-group\":\n did=tag[\"Value\"]\n if tag[\"Key\"] == \"blueprint-name\":\n bpn=tag[\"Value\"]\n if tag[\"Key\"] == \"Name\":\n name=tag[\"Value\"]\n if tag[\"Key\"] == \"expire-on\":\n expire=tag[\"Value\"]\n tbl.AddRow([name,did,bpn,expire,\"Terminating\"])\n iid.append(i[\"InstanceId\"])\n response = aws.terminateInstances(iid)\n tbl.Draw()\n print(\"\")\n sys.exit(0)\n\n # pull sample yaml file from github as reference\n if arg.sample:\n print(\"Downloading file...\")\n sfile = urllib.request.URLopener()\n sfile.retrieve(\"https://raw.githubusercontent.com/graboskyc/DeployBlueprint/master/Samples/sampleblueprint.yaml\", os.path.expanduser('~') + \"/sample.yaml\")\n print(\"Check your home directory for sample.yaml\")\n sys.exit(0)\n\n # if they specifify a yaml, use that\n # otherwise we will use the hard coded blueprint above\n if (arg.blueprint != None):\n print(\"Using YAML file provided.\")\n with open(arg.blueprint,\"r\") as s:\n try:\n y = yaml.load(s.read())\n except:\n print(\"Error parsing YAML file!\")\n sys.exit(2)\n \n if y[\"apiVersion\"] != \"v1\":\n print(\"UNSUPPORTED VERSION OF BLUEPRINT YAML. THIS VERSION USES v1 ONLY.\")\n sys.exit(5)\n else:\n print(\"Using Blueprint format \" + y[\"apiVersion\"])\n\n blueprint = []\n sblueprint = []\n if \"resources\" in y:\n blueprint = y[\"resources\"]\n if \"services\" in y:\n sblueprint = y[\"services\"]\n if \"metadata\" in y:\n if \"blueprint_name\" in y[\"metadata\"]:\n bpname = y[\"metadata\"][\"blueprint_name\"]\n if \"blueprint_description\" in y[\"metadata\"]:\n bpdesc = y[\"metadata\"][\"blueprint_description\"]\n if \"purpose\" in y[\"metadata\"]:\n bppurpose = y[\"metadata\"][\"purpose\"]\n\n\n # always prepend a random 8 characters \n # makes it easier to find and be grouped later\n # and track whether any failed deploys\n uid = str(uuid.uuid4())[:8]\n success=True\n conf = {}\n resdays = 7\n\n # logging tool\n log = Logger(uid)\n\n # default to 7 day reservation, otherwise take args\n if (arg.days != None):\n resdays = int(arg.days)\n\n # do not change order!\n # values get overridden below\n t = []\n t.append( {'Key':'Name', 'Value':'from the api'} )\n t.append( {'Key':'owner', 'Value':'some.guy'} )\n t.append( {'Key':'expire-on', 'Value':str(datetime.date.today()+ datetime.timedelta(days=resdays))} )\n t.append( {'Key':'use-group', 'Value':uid} )\n t.append( {'Key':'blueprint-name', 'Value':bpname})\n t.append( {'Key':'blueprint-desc', 'Value':bpdesc[:255]})\n t.append( {'Key':'purpose', 'Value':bppurpose})\n\n # parse the config files\n if (os.path.isfile(os.path.expanduser('~') + \"/.gskyaws.conf\") and os.path.isfile(os.path.expanduser('~') + \"/.aws/config\")):\n # my custom config file parsing\n with open(os.path.expanduser('~') + \"/.gskyaws.conf\", 'r') as cf:\n for line in cf:\n temp = line.split(\"=\")\n conf[temp[0]] = temp[1].replace('\"',\"\").replace(\"\\n\",\"\")\n \n # config files from aws cli utility\n cp = SafeConfigParser()\n cp.read(os.path.expanduser('~') + \"/.aws/config\")\n region = cp.get(\"default\",\"region\")\n\n else:\n # configs were not present\n print(\"\")\n print(\"You need your config in ~/.gskyaws.conf.\")\n print(\"See: https://raw.githubusercontent.com/graboskyc/MongoDBInit/master/updateAWSSG.sh\")\n print(\"Create ~/.gskyaws.conf with values:\")\n print('sgID=\"sg-yoursgidhere\"')\n print('keypair=\"yourawskeypairname\"')\n print('name=\"firstname.lastname\"')\n print(\"\")\n print(\"And you need to run `aws configure` to configure that as well\")\n print(\"\")\n sys.exit(1)\n\n\n # where to deploy\n # remember, you need ~/.aws/credentials set!\n aws = AWS(region)\n\n # being deployment of each instance\n print(\"Deploying Instances...\")\n tbl = Table()\n tbl.AddHeader([\"Instance ID\", \"Name\", \"Op System\", \"Size\", \"Succ/Fail\"])\n for resource in blueprint:\n print(\"Trying to deploy \" + resource[\"name\"])\n try:\n # actually deploy\n inst = aws.makeInstance(aws.getAMI(resource[\"os\"])[\"id\"], resource[\"size\"], [conf['sgID']], conf['keypair'])\n time.sleep(1)\n # update tags for tracking and reaping\n t[0] = {'Key':'Name', 'Value':uid + \"_\" +resource[\"name\"]} \n t[1] = {'Key':'owner', 'Value': conf[\"name\"]} \n inst[0].create_tags(Tags=t)\n resource[\"id\"] = inst[0].id\n resource[\"resourcename\"] = uid + \"_\" +resource[\"name\"]\n if \"overrideuser\" in resource:\n resource[\"username\"] = resource[\"overrideuser\"]\n else:\n resource[\"username\"] = aws.getAMI(resource[\"os\"])[\"user\"]\n tbl.AddRow([inst[0].id, uid + \"_\" +resource[\"name\"], resource[\"os\"], resource[\"size\"], \"Success\"])\n except:\n success=False\n tbl.AddRow([resource[\"name\"], uid + \"_\" +resource[\"name\"], resource[\"os\"], resource[\"size\"], \"Fail\"])\n log.write(\"ERROR!\")\n log.write(str(sys.exc_info()[0]))\n log.write(str(sys.exc_info()[1]))\n log.write(str(sys.exc_info()[2]))\n\n print(\"\")\n print(\"Results:\")\n print(\"\")\n\n tbl.Draw()\n log.writeSection(\"Deploying Instances\", tbl.Return())\n\n tbl.Clear()\n print(\"Deploying services...\")\n log.writeSection(\"Deploying Atlas\", \"\")\n tbl.AddHeader([\"Name\", \"Type\", \"Cloud\", \"Size\", \"Status\"])\n atlas = Atlas(conf[\"atlasusername\"], conf[\"atlasapikey\"], uid)\n for service in sblueprint:\n print(\"Trying to deploy \" + service[\"name\"])\n log.writeTimestamp(\"Trying to deploy\" + service[\"name\"])\n backup = False\n bi = False\n enc = False\n if \"backup\" in service:\n backup = service[\"backup\"]\n if \"biconnector\" in service:\n bi = service[\"backup\"]\n if \"encrypted\" in service:\n enc = service[\"encrypted\"]\n if \"iops\" in service:\n iops = int(service[\"iops\"])\n if \"disksize\" in service:\n ds = service[\"disksize\"]\n\n worked,output = atlas.createCluster(service[\"name\"], service[\"groupid\"], service[\"region\"], service[\"type\"], service[\"version\"], service[\"cloud\"], service[\"size\"], service[\"rscount\"], service[\"shards\"], ds, iops, backup, bi, enc)\n\n if worked:\n tbl.AddRow([service[\"name\"], service[\"type\"], service[\"cloud\"], service[\"size\"], \"Deploying...\"])\n else:\n tbl.AddRow([service[\"name\"], service[\"type\"], service[\"cloud\"], service[\"size\"], \"Failed!\"])\n log.write(\"ERROR!\")\n log.write(output)\n log.write(tbl.Return())\n print(\"Services deployed:\")\n tbl.Draw()\n\n # wait for everything to come up\n print(\"\")\n sys.stdout.write(\"Waiting for successfully deployed instances to come up...\")\n sys.stdout.flush()\n reservations = r_checkStatus(region, uid)\n print(\"\")\n print(\"Instances are running...\")\n print(\"\")\n sys.stdout.write(\"Waiting for successfully deployed services to come up...\")\n sys.stdout.flush()\n clusters = atlas.r_waitForCluster()\n print(\"Everything is ready.\")\n print(\"Building Post-Configuration Plan...\")\n print(\"\")\n\n # build the task list\n tasks=Tasks()\n time.sleep(5)\n reservations = r_checkStatus(region, uid)\n clusters = atlas.r_waitForCluster()\n for resource in blueprint:\n i=0\n # find the DNS Name\n for r in reservations[\"Reservations\"]:\n for i in r[\"Instances\"]:\n if i[\"InstanceId\"] == resource[\"id\"]:\n resource[\"dns\"] = i[\"PublicDnsName\"]\n \n if \"tasks\" in resource:\n tl=[]\n for task in resource[\"tasks\"]:\n task[\"resourceid\"] = resource[\"id\"]\n task[\"resourcedeployedname\"] = resource[\"resourcename\"]\n task[\"resourcename\"] = resource[\"name\"]\n task[\"dns\"] = resource[\"dns\"]\n task[\"status\"] = \"Pending\"\n task[\"username\"] = resource[\"username\"]\n tl.append(task)\n tasks.addTaskGroup(int(resource[\"postinstallorder\"]), tl)\n for service in sblueprint:\n if \"tasks\" in service:\n tl=[]\n for task in service[\"tasks\"]:\n task[\"resourceid\"] = \"\"\n task[\"resourcedeployedname\"] = service[\"name\"]\n task[\"resourcename\"] = service[\"name\"]\n task[\"dns\"] = \"\"\n task[\"status\"] = \"Pending\"\n task[\"username\"] = \"\"\n tl.append(task)\n tasks.addTaskGroup(int(service[\"postinstallorder\"]), tl)\n\n # draw user output\n i=1\n tbl.Clear()\n tbl.AddHeader([\"Task Number\", \"Name\", \"ID\", \"Public DNS Name\", \"Type\", \"Description\", \"Status\"])\n for tl in tasks.getTasks():\n for t in tl:\n tbl.AddRow([str(i), t[\"resourcedeployedname\"],t[\"resourceid\"], t[\"dns\"], t[\"type\"],t[\"description\"], t[\"status\"]])\n i=i+1\n print(\"Plan created:\")\n print(\"\")\n tbl.Draw()\n\n log.writeSection(\"Post-Deploy Plan\", tbl.Return())\n\n if len(tasks.getTasks()) == 0:\n print(\"\")\n print(\"No tasks to do.\")\n log.write(\"no tasks to do.\")\n print(\"\")\n else:\n i=1\n cm = ChangeManagement()\n for tl in tasks.getTasks():\n for t in tl:\n print(\"Beginning Task %s (%s) on %s...\" % (str(i), t[\"description\"], t[\"resourcedeployedname\"]))\n if t[\"type\"] == \"playbook\":\n t[\"status\"] = \"Running\"\n try:\n log.writeTimestamp(\"BEGINING TASK \" + str(i))\n result = cm.runPlaybook(t[\"url\"], t[\"dns\"], uid, i, arg.keypath, t[\"username\"])\n log.write(result)\n t[\"status\"] = \"Completed\"\n except:\n t[\"status\"] = \"Failed\"\n log.writeTimestamp(\"Tried running task \" + str(i) + \" on \" + t[\"dns\"])\n log.write(\"ERROR:\")\n log.write(str(sys.exc_info()[0]))\n elif t[\"type\"] == \"shell\":\n t[\"status\"] = \"Running\"\n try:\n log.writeTimestamp(\"BEGINING TASK \" + str(i))\n result = cm.runBashScript(t[\"url\"], t[\"dns\"], uid, i, arg.keypath, t[\"username\"])\n log.write(result)\n t[\"status\"] = \"Completed\"\n except:\n t[\"status\"] = \"Failed\"\n log.writeTimestamp(\"Tried running task \" + str(i) + \" on \" + t[\"dns\"])\n log.write(\"ERROR:\")\n log.write(str(sys.exc_info()))\n elif t[\"type\"] == \"local\":\n t[\"status\"] = \"Running\"\n try:\n log.writeTimestamp(\"BEGINING TASK \" + str(i))\n result=cm.runLocal(t[\"cmd\"], uid, i)\n log.write(result)\n t[\"status\"] = \"Completed\"\n except:\n t[\"status\"] = \"Failed\"\n log.writeTimestamp(\"Tried running task \" + str(i) + \" on \" + t[\"resourcedeployedname\"])\n log.write(\"ERROR:\")\n log.write(str(sys.exc_info()))\n else:\n t[\"status\"] = \"TypeError\"\n log.writeTimestamp(\"Tried running task \" + str(i) + \" on \" + t[\"dns\"])\n log.write(\"ERROR:\\nUnsupported task type.\")\n i=i+1\n print(\"\")\n\n # print results\n i=1\n tbl.Clear()\n tbl.AddHeader([\"Task Number\", \"Name\", \"ID\", \"Public DNS Name\", \"Type\", \"Description\", \"Status\"])\n for tl in tasks.getTasks():\n for t in tl:\n tbl.AddRow([str(i), t[\"resourcedeployedname\"],t[\"resourceid\"], t[\"dns\"], t[\"type\"],t[\"description\"], t[\"status\"]])\n i=i+1\n print(\"Plan results:\")\n print(\"\")\n tbl.Draw()\n log.write(tbl.Return())\n\n # completed\n print(\"\")\n if success:\n print(\"Blueprint Successfully Deployed!\")\n log.writeSection(\"Completion\", \"Blueprint Successfully Deployed!\")\n else:\n print(\"The blueprint may not have been successfully deployed.\")\n log.writeSection(\"Completion\", \"The blueprint may not have been successfully deployed.\")\n print(\"\")","repo_name":"graboskyc/DeployBlueprint","sub_path":"DeployBlueprint/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":22476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74739586183","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 24 13:45:32 2020\n\n@author: user\n\"\"\"\n\n\n#import tensorflow as tf\nimport numpy as np\nfrom PIL import Image\nfrom os import listdir\nfrom os.path import isfile, join\nimport matplotlib.pyplot as plt\nfrom natsort import natsort_keygen, ns\nfrom skimage import measure\nimport scipy\nimport cv2 as cv\nfrom natsort import natsort_keygen, ns\n\nfrom plot_functions_CLEANED import *\nfrom data_functions_CLEANED import *\nfrom data_functions_3D import *\n#from UNet import *\n#from UNet_3D import *\nimport glob, os\nnatsort_key1 = natsort_keygen(key = lambda y: y.lower()) # natural sorting order\n\n#from csbdeep.internals import predict\nfrom tifffile import *\nimport tkinter\nfrom tkinter import filedialog\n\n\n\"\"\" Required to allow correct GPU usage ==> or else crashes \"\"\"\n# import tensorflow as tf\n# config = tf.ConfigProto()\n# config.gpu_options.allow_growth = True\n# tf.keras.backend.set_session(tf.Session(config=config))\n\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n#from UNet_pytorch import *\nfrom UNet_pytorch_online import *\nfrom PYTORCH_dataloader import *\nfrom UNet_functions_PYTORCH import *\n\ntorch.backends.cudnn.benchmark = True \ntorch.backends.cudnn.enabled = True # new thing? what do? must be True\n\n\"\"\" Define GPU to use \"\"\"\ndevice = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\n\n\n\"\"\" Import network \"\"\"\n#unet = UNet_online()\n\n\"\"\" Network Begins: \"\"\"\n#s_path = './Checkpoints_for_GITHUB/'\n#s_path = './(4) Checkpoints_PYTORCH_5x5_256x64_no_CONVTRANSP_matched_no_DILATION_COMPLEX/'\n#s_path = './(12) Checkpoints_TITAN_5x5_256x64_NO_transforms_AdamW_spatial/'\n#s_path = './(18) Checkpoints_TITAN_NO_transforms_AdamW_batch_norm_SPATIAL/' \n#s_path = './(16) Checkpoints_TITAN_YES_transforms_AdamW_SLOWER_switchable_BN/'\n\ns_path = './(19) Checkpoints_TITAN_NO_transforms_AdamW_batch_norm_CLEAN_DATA/'\n\n\n#s_path = './(20) Checkpoints_PYTORCH_NO_transforms_AdamW_batch_norm_CLEAN_DATA/'\n\ns_path = './(21) Checkpoints_PYTORCH_NO_transforms_AdamW_batch_norm_CLEAN_DATA_LARGE_NETWORK/'\n\n\n\n\n\"\"\" AMOUNT OF EDGE TO ELIMINATE \n\n\n scaling???\n\"\"\"\n\n\nimport argparse\nfrom pyimq import filters, script_options, utils, myimage\ndef get_quality_script_options(arguments):\n parser = argparse.ArgumentParser(\n description=\"Command line arguments for the \"\n \"image quality ranking software\"\n )\n\n parser.add_argument(\n \"--file\",\n help=\"Defines a path to the image files\",\n default=None\n )\n parser.add_argument(\n \"--working-directory\",\n dest=\"working_directory\",\n help=\"Defines the location of the working directory\",\n default=\"/home/sami/Pictures/Quality\"\n )\n parser.add_argument(\n \"--mode\",\n choices=[\"file\", \"directory\", \"analyze\", \"plot\"],\n action=\"append\",\n help=\"The argument containing the functionality of the main program\"\n \"You can concatenate actions by defining multiple modes in a\"\n \"single command, e.g. --mode=directory --mode=analyze\"\n )\n # Parameters for controlling the way plot functionality works.\n parser.add_argument(\n \"--result\",\n default=\"average\",\n choices=[\"average\", \"fskew\", \"ientropy\", \"fentropy\", \"fstd\",\n \"fkurtosis\", \"fpw\", \"fmean\", \"icv\", \"meanbin\"],\n help=\"Tell how you want the results to be calculated.\"\n )\n parser.add_argument(\n \"--npics\",\n type=int,\n default=9,\n help=\"Define how many images are shown in the plots\"\n )\n\n parser = filters.get_common_options(parser)\n parser = myimage.get_options(parser)\n return parser.parse_args(arguments)\n\n\n\n\noverlap_percent = 0.5\ninput_size = 256\ndepth = 64\nnum_truth_class = 2\n\n\n# \"\"\" load mean and std \"\"\" \ninput_path = './normalize_pytorch_CLEANED/'\nmean_arr = np.load(input_path + 'mean_VERIFIED.npy')\nstd_arr = np.load(input_path + 'std_VERIFIED.npy')\n\n\n\"\"\" Select multiple folders for analysis AND creates new subfolder for results output \"\"\"\nroot = tkinter.Tk()\n# get input folders\nanother_folder = 'y';\nlist_folder = []\ninput_path = \"./\"\n\ninitial_dir = '/media/user/storage/Data/'\nwhile(another_folder == 'y'):\n input_path = filedialog.askdirectory(parent=root, initialdir= initial_dir,\n title='Please select input directory')\n input_path = input_path + '/'\n \n print('Do you want to select another folder? (y/n)')\n another_folder = input(); # currently hangs forever\n #another_folder = 'y';\n\n list_folder.append(input_path)\n initial_dir = input_path\n \n\n\"\"\" Loop through all the folders and do the analysis!!!\"\"\"\nfor input_path in list_folder:\n foldername = input_path.split('/')[-2]\n sav_dir = input_path + '/' + foldername + '_output_PYTORCH_RETRAINED_105834'\n\n \"\"\" For testing ILASTIK images \"\"\"\n # images = glob.glob(os.path.join(input_path,'*.tif')) # can switch this to \"*truth.tif\" if there is no name for \"input\"\n # images.sort(key=natsort_keygen(alg=ns.REAL)) # natural sorting\n # examples = [dict(input=i,truth=i.replace('.tif','_truth.tif'), ilastik=i.replace('.tif','_single_Object Predictions_.tiff')) for i in images]\n\n\n images = glob.glob(os.path.join(input_path,'*_single_channel.tif')) # can switch this to \"*truth.tif\" if there is no name for \"input\"\n images.sort(key=natsort_keygen(alg=ns.REAL)) # natural sorting\n examples = [dict(input=i,truth=i.replace('_single_channel.tif','_truth.tif'), ilastik=i.replace('_single_channel.tif','_single_Object Predictions_.tiff')) for i in images]\n\n\n # images = glob.glob(os.path.join(input_path,'*_RAW_REGISTERED.tif')) # can switch this to \"*truth.tif\" if there is no name for \"input\"\n # images.sort(key=natsort_keygen(alg=ns.REAL)) # natural sorting\n # examples = [dict(input=i,truth=i.replace('_RAW_REGISTERED.tif','_TRUTH_REGISTERED.tif'), ilastik=i.replace('_RAW_REGISTERED.tif','_single_Object Predictions_.tiff')) for i in images]\n\n\n # images = glob.glob(os.path.join(input_path,'*_RAW_REGISTERED_substack_1_110.tif')) # can switch this to \"*truth.tif\" if there is no name for \"input\"\n # images.sort(key=natsort_keygen(alg=ns.REAL)) # natural sorting\n # examples = [dict(input=i,truth=i.replace('_RAW_REGISTERED_substack_1_110.tif','_TRUTH_REGISTERED_substack_1_11_m_ilastik.tif'), ilastik=i.replace('_RAW_REGISTERED.tif','_single_Object Predictions_.tiff')) for i in images]\n\n \n\n \n try:\n # Create target Directory\n os.mkdir(sav_dir)\n print(\"Directory \" , sav_dir , \" Created \") \n except FileExistsError:\n print(\"Directory \" , sav_dir , \" already exists\")\n \n sav_dir = sav_dir + '/'\n \n # Required to initialize all\n batch_size = 1;\n \n batch_x = []; batch_y = [];\n weights = [];\n \n plot_jaccard = [];\n \n output_stack = [];\n output_stack_masked = [];\n all_PPV = [];\n input_im_stack = [];\n for i in range(len(examples)):\n \n \n \n \"\"\" TRY INFERENCE WITH PATCH-BASED analysis from TORCHIO \"\"\"\n with torch.set_grad_enabled(False): # saves GPU RAM \n input_name = examples[i]['input'] \n input_im = open_image_sequence_to_3D(input_name, width_max='default', height_max='default', depth='default')\n \n from skimage import filters\n \"\"\" Find BRISQUE quality \"\"\"\n # plt.figure()\n # append_mean_SNR = []\n # for val in range(10, 50, 10):\n # #val = 100\n # all_SNR = [];\n # for depth in range(0, len(input_im) - 33, 33):\n \n # first_slices= input_im[depth:depth + 33, ...]\n # max_first = plot_max(first_slices, ax=0, plot=0)\n # im = Image.fromarray(np.uint8(max_first))\n \n \n \n # xres = 0.83\n # yres = 0.83\n \n # signal = np.mean(np.where(max_first > val))\n # print(signal)\n # noise = np.std(np.where(max_first < val))\n # print(noise)\n \n # SNR = 10 * math.log10(signal/noise)\n \n # all_SNR.append(SNR)\n \n # plt.plot(all_SNR)\n # append_mean_SNR.append(all_SNR)\n \n # #zzz\n # mean_SNR = np.nanmean(append_mean_SNR, axis=0)\n # # #save_snr = mean_SNR\n #zzz\n \n \n \"\"\" Using otsu threshold \"\"\"\n from skimage.filters import threshold_otsu\n from skimage.filters import threshold_triangle\n plt.figure()\n append_mean_SNR = []\n #for val in range(10, 240, 10):\n #val = 100\n all_SNR = [];\n \n\n first_slices= input_im[depth:depth + 33, ...]\n max_first = plot_max(first_slices, ax=0, plot=0)\n \n thresh = threshold_otsu(input_im)\n for depth in range(0, len(input_im) - 33, 33):\n \n first_slices= input_im[depth:depth + 33, ...]\n max_first = plot_max(first_slices, ax=0, plot=0)\n #im = Image.fromarray(np.uint8(max_first))\n \n \n #thresh = threshold_otsu(max_first)\n xres = 0.83\n yres = 0.83\n \n signal = np.mean(np.where(max_first > thresh))\n noise = np.std(np.where(max_first < thresh))\n \n SNR = 10 * math.log10(signal/noise)\n \n all_SNR.append(SNR)\n \n plt.plot(all_SNR)\n #append_mean_SNR.append(all_SNR)\n \n mean_SNR = all_SNR\n #save_snr = mean_SNR \n \n \n zzz\n \"\"\" Try shannon entropy \"\"\"\n # all_entropy = []\n # for depth in range(0, len(input_im) - 33, 33):\n\n # first_slices= input_im[depth:depth + 33, ...]\n # max_first = plot_max(first_slices, ax=0, plot=0)\n # #normalized = (max_first-np.min(max_first))/(np.max(max_first)-np.min(max_first))\n # #im = Image.fromarray\n # # max_first += 1\n\n # # pA = max_first/max_first.sum()\n # # Shannon2 = -np.nansum(pA*np.log2(pA))\n \n # import skimage.measure\n # entropy = skimage.measure.shannon_entropy(max_first)\n # plt.figure(); plt.imshow(max_first)\n # all_entropy.append(entropy)\n # print(entropy)\n\n\n # \"\"\" Try with local crops \"\"\"\n # first_slices= input_im[0:33, ...]\n # max_first = plot_max(first_slices, ax=0, plot=0)\n # #plt.figure(); plt.imshow(max_first)\n \n \n # crop_1 = max_first[200:200 + 160, 80:80 + 160]\n # entropy = skimage.measure.shannon_entropy(crop_1)\n # entropy = np.std(crop_1)\n # print(entropy)\n # plt.figure(); plt.imshow(crop_1); plt.title('Entropy: ' + str(entropy))\n\n # crop_2 = max_first[320:320 + 160, 320:320 + 160]\n # entropy = skimage.measure.shannon_entropy(crop_2)\n # entropy = np.std(crop_2)\n # print(entropy)\n # plt.figure(); plt.imshow(crop_2); plt.title('Entropy: ' + str(entropy))\n\n \n # # from skimage.util import random_noise\n # # plt.close('all')\n \n # # for i in np.arange(0, 4, 0.1):\n # # crop_2_noise = random_noise(crop_2/255, mode='gaussian', var = i)\n # # entropy = skimage.measure.shannon_entropy(crop_2_noise)\n # # print(entropy)\n # # plt.figure(); plt.imshow(crop_2_noise); plt.title('Entropy: ' + str(entropy))\n \n\n\n # crop_3 = max_first[420:420 + 160, 420:420 + 160]\n # entropy = skimage.measure.shannon_entropy(crop_3)\n # entropy = np.std(crop_3)\n # print(entropy)\n # plt.figure(); plt.imshow(crop_3); plt.title('Entropy: ' + str(entropy))\n \n # \"\"\" Try with local crops \"\"\"\n # first_slices= input_im[100:133, ...]\n # max_first = plot_max(first_slices, ax=0, plot=0)\n # #plt.figure(); plt.imshow(max_first)\n \n \n # crop_1 = max_first[200:200 + 160, 80:80 + 160]\n # entropy = skimage.measure.shannon_entropy(crop_1)\n # entropy = np.std(crop_1)\n # print(entropy)\n # plt.figure(); plt.imshow(crop_1); plt.title('Entropy: ' + str(entropy))\n\n # crop_2 = max_first[320:320 + 160, 320:320 + 160]\n # entropy = skimage.measure.shannon_entropy(crop_2)\n # entropy = np.std(crop_2)\n # print(entropy)\n # plt.figure(); plt.imshow(crop_2); plt.title('Entropy: ' + str(entropy))\n\n # crop_3 = max_first[420:420 + 160, 420:420 + 160]\n # entropy = skimage.measure.shannon_entropy(crop_3)\n # entropy = np.std(crop_3)\n # print(entropy)\n # plt.figure(); plt.imshow(crop_3); plt.title('Entropy: ' + str(entropy))\n \n \n \n \"\"\" Set globally \"\"\"\n plt.rc('xtick',labelsize=16)\n plt.rc('ytick',labelsize=16)\n ax_title_size = 18\n leg_size = 16\n plt.rcParams['figure.dpi'] = 300\n \n \"\"\" Stop here and run again with low SNR\"\"\"\n plt.figure(figsize=(4,4));\n x_axis = [100, 200, 300, 400]\n plt.plot(x_axis, save_snr[0:len(x_axis)])\n plt.plot(x_axis, mean_SNR[0:len(x_axis)])\n \n plt.ylim([0, 3])\n \n ax = plt.gca()\n\n\n ax.legend(['optimal quality', 'degraded quality'], frameon=False, fontsize=leg_size, loc='lower left')\n\n #plt.xlabel(\"proportion of tracks\", fontsize=14)\n plt.xlabel('Depth (\\u03bcm)', fontsize=ax_title_size)\n plt.ylabel(\"SNR\", fontsize=ax_title_size)\n #plt.yscale(\"log\")\n #plt.yticks(np.arange(0, max(errs)+1, 5))\n rs = ax.spines[\"right\"]; rs.set_visible(False); ts = ax.spines[\"top\"]; ts.set_visible(False)\n plt.tight_layout()\n plt.savefig(sav_dir + 'SNR_comparison' + '.png') \n zzz\n #zzz\n \n \n \n ","repo_name":"yxu233/Xu_Bergles_2021_Oligo_Track","sub_path":"Oligo_Track/functional/find_image_quality.py","file_name":"find_image_quality.py","file_ext":"py","file_size_in_byte":14805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"43678785637","text":"\n\n\ngrps = data.groupby(data.index) \n[(g[0], g[1].Time[0], g[1].Time[-1], g[1].Open[0], max(g[1].High), min(g[1].Low), g[1].Close[-1]) for g in grps]\noneD = pd.DataFrame.from_records(one_day_data, columns=['Date', 'Start', 'End', 'Open', 'High', 'Low', 'Close'], index='Date')\n\n#Clacola High - Open => Minimum gain\noneD['HO'] = (oneD['High'] - oneD['Open'])*10000\n\n#Calcola Open - Low => Stop Loss\n\n\nimport pandas as pd\nimport numpy as np\n\n#Generate second data from M1 candle\ndef generate_data(row):\n\t# Create an array of 60 random number, ranging [Low,High)\n\tgdf=np.random.random(60)*(row.High-row.Low)+row.Low\n\t# Adjust open, close and max values\n\tgdf[0]=row.Open\n\tgdf[-1]=row.Close\n\tgdf[gdf.argmax()]=row.High\n\t# Return a pandas dataframe with generated data\n\treturn pd.DataFrame(gdf, index=pd.date_range(start=row.XDate, freq='s', periods=60))\n\n#Read data from file\ndata = pd.read_csv(\"data/DAT_ASCII_EURGBP_M1_201605.csv\", names=['Date', 'Open', 'High', 'Low', 'Close', 'Vol'], index_col=[0], parse_dates=True, sep=';')\ndata['XDate']=data.index #Replicate the index as a value to use apply\n#Generate fulll data\ngen_data=[]\nfor i in range(len(data)):\n\tif i == 0:\n\t\tgen_data = generate_data(data.ix[i])\n\telse:\n\t\tgen_data=gen_data.append(generate_data(data.ix[i]))\n\n#gen_data.to_csv('data/DAT_ASCII_EURGBP_M1_201605_generated.csv')\n","repo_name":"rek87/finance","sub_path":"pandas_ex.py","file_name":"pandas_ex.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8594452651","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 23 12:50:34 2016\n\n@author: alex\n\"\"\"\n\nfrom AlexRobotics.dynamic import Manipulator as M\nfrom AlexRobotics.control import DPO as DPO\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Define dynamic system\nR = M.OneLinkManipulator()\n\n# Define controller\ncost_function = 'quadratic'\nQLearningAlgo = DPO.QLearning1DOF( R , cost_function )\n\n\npath = 'data/'\nname = 'R1_Qlearning' + cost_function \n\n#n_steps = 10000\nn_steps = 1\n\nQLearningAlgo.first_step()\nQLearningAlgo.load_data( name )\n\nx0 = np.array( [ 3 , 0 ] )\nQLearningAlgo.x0 = x0\n\nQLearningAlgo.training( n_steps , True , True )\n\nQLearningAlgo.save_data( name )\n\nQLearningAlgo.plot_J()\n\nR.plotAnimation( x0 )","repo_name":"ali493/pyro","sub_path":"old/projects/6.884/onelinkmanipulator_with_Qlearning.py","file_name":"onelinkmanipulator_with_Qlearning.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3034940876","text":"import unittest\r\nfrom testfixtures import compare\r\nimport copy\r\n\r\ndef find_stable_matching(men_preferences, women_preferences):\r\n men_preferences = copy.deepcopy(men_preferences)\r\n women_preferences = copy.deepcopy(women_preferences) \r\n n = len(men_preferences)\r\n k = len(women_preferences)\r\n active_men = [True] * n\r\n proposals = [[False] * n for i in range(k)]\r\n while abs(n - k) != sum(active_men) != 0: # while we have active men\r\n for man, is_active in enumerate(active_men):\r\n if is_active: # in state of \"maybe\" \r\n man_pref = men_preferences[man]\r\n proposals[man_pref.pop(0)][man] = True #make proposals\r\n active_men = [True] * n\r\n for woman, _ in enumerate(proposals):\r\n for man in women_preferences[woman]:\r\n if proposals[woman][man]:\r\n proposals[woman] = [False] * n\r\n proposals[woman][man] = True\r\n active_men[man] = False # mark first encountered man as \"maybe\" if no proposals\r\n break\r\n stable_matching = []\r\n for woman, woman_proposal in enumerate(proposals):\r\n for man, yes in enumerate(woman_proposal):\r\n if yes:\r\n stable_matching.append((man, woman))\r\n return stable_matching\r\n\r\n\r\nclass TestMarriage(unittest.TestCase):\r\n def setUp(self):\r\n self.minus_one = lambda arr: list(map(lambda x: list(map(lambda y: y - 1, x)), arr))\r\n pass\r\n def test_marriage(self):\r\n men_preferences = [[2,1,3,4], [4, 1, 2, 3], [1, 3, 2, 4], [2, 3, 1, 4]]\r\n women_preferences = [[1, 3, 2, 4], [3, 4, 1, 2], [4, 2, 3, 1], [3, 2, 1, 4]]\r\n men_preferences = self.minus_one(men_preferences)\r\n women_preferences = self.minus_one(women_preferences)\r\n self.assertCountEqual(find_stable_matching(men_preferences, women_preferences), [(0,0),(1,3),(2,2),(3,1)])\r\n self.assertCountEqual(find_stable_matching(women_preferences, men_preferences), [(0,0),(1,2),(2,3),(3,1)])\r\n\r\n def test_unequal_marriage(self):\r\n men_preferences = [[0,2,1],[1,0,2]]\r\n women_preferences = [[1,0], [0, 1], [0,1]] \r\n self.assertCountEqual(find_stable_matching(men_preferences, women_preferences), [(0,0), (1,1)])\r\n self.assertCountEqual(find_stable_matching(women_preferences, men_preferences), [(2,0),(0,1)])\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()","repo_name":"bartergit/algorithms","sub_path":"stable-matching-problem/stable_matching_problem.py","file_name":"stable_matching_problem.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40470005856","text":"import datetime\n\nimport sqlalchemy\n\nfrom service import Base_db\n\n\nclass Data(Base_db):\n __tablename__ = 'data'\n id = sqlalchemy.Column(\"id\", sqlalchemy.Integer, primary_key=True, autoincrement=True)\n author_id = sqlalchemy.Column(\"author_id\", sqlalchemy.Integer)\n key = sqlalchemy.Column(\"key\", sqlalchemy.String(100))\n value = sqlalchemy.Column(\"value\", sqlalchemy.String(100))\n time_creation = sqlalchemy.Column(\"time_creation\", sqlalchemy.DateTime)\n\n def __init__(self, author_id: int, key: str, value: str, time_creation: datetime):\n self.author_id = author_id\n self.key = key\n self.value = value\n self.time_creation = time_creation\n","repo_name":"Serzho/Database_Server","sub_path":"server/data_table.py","file_name":"data_table.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13033256617","text":"\"\"\" Implementations of support chart types\n\nThis module contains functions for chart types.\n\n\"\"\"\n\nimport datetime\nimport typing\nfrom typing import Mapping\n\nimport attr\nimport matplotlib.dates as mdates\nimport matplotlib.pyplot as plt\nimport matplotlib.units as munits\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import colors, ticker, transforms\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib.colors import Colormap\n\nfrom ._base_chart import _BaseChart\n\n# For conciseDateFormatter for all plots https://matplotlib.org/3.1.0/gallery/ticks_and_spines/date_concise_formatter.html\nconverter = mdates.ConciseDateConverter()\nmunits.registry[np.datetime64] = converter\nmunits.registry[datetime.date] = converter\nmunits.registry[datetime.datetime] = converter\n\n\n@attr.s()\nclass BarChartRace(_BaseChart):\n \"\"\" BarChart implementation for bar chart races\n\n Args:\n BaseChart (BaseChart): Base class shared by all chart types\n\n Returns:\n BarChart: Instance of BarChart allowing for inclusion in subplot charts or animating with .save()\n \"\"\"\n\n orientation: str = attr.ib()\n sort: str = attr.ib()\n label_bars: bool = attr.ib()\n bar_label_size: typing.Union[int, float] = attr.ib()\n n_visible: int = attr.ib()\n fixed_order: typing.Union[list, bool] = attr.ib()\n\n perpendicular_bar_func: typing.Callable = attr.ib()\n\n def __attrs_post_init__(self):\n \"\"\" Properties to be determined after initialization\n \"\"\"\n self.n_visible = self.n_visible or self.df.shape[1]\n\n if self.fixed_order is True:\n last_values = self.df.iloc[-1].sort_values(ascending=False)\n cols = last_values.iloc[: self.n_visible].index\n self.df = self.df[cols]\n elif isinstance(self.fixed_order, list):\n cols = self.fixed_order\n self.df = self.df[cols]\n\n super().__attrs_post_init__()\n\n if self.n_visible > 15:\n import warnings\n\n warnings.warn(\n \"Plotting too many bars may result in undesirable output, use `n_visible=15` to limit number of bars\"\n )\n\n self.validate_params()\n\n self.df_rank = self.calculate_ranks(self.orig_df)\n\n if self.fixed_order:\n\n n = self.df.shape[1] + 1\n m = self.df.shape[0]\n rank_row = np.arange(1, n)\n if (self.sort == \"desc\" and self.orientation == \"h\") or (\n self.sort == \"asc\" and self.orientation == \"v\"\n ):\n rank_row = rank_row[::-1]\n\n ranks_arr = np.repeat(rank_row.reshape(1, -1), m, axis=0)\n self.df_rank = pd.DataFrame(data=ranks_arr, columns=cols)\n\n self.orig_index = self.df.index.astype(\"str\")\n\n self.bar_colors = self.get_colors(self.cmap)\n\n self.ax.tick_params(labelsize=self.tick_label_size)\n\n def validate_params(self):\n \"\"\" Validate parameters provided to chart instance\n\n Raises:\n ValueError: If sort value is not provided (either 'asc' or 'desc')\n ValueError: Orientation must be 'h' (horizontal) or 'v' (vertical)\n \"\"\"\n super().validate_params()\n\n if self.sort not in (\"asc\", \"desc\"):\n raise ValueError('`sort` must be \"asc\" or \"desc\"')\n\n if self.orientation not in (\"h\", \"v\"):\n raise ValueError('`orientation` must be \"h\" or \"v\"')\n\n def get_colors(\n self, cmap: typing.Union[str, Colormap, typing.List[str]]\n ) -> np.array:\n \"\"\" Get array of colours from BaseChart.get_colors and shorten to number of bars\n\n Args:\n cmap (typing.Union[str, colors.Colormap, typing.List[str]]): Provide string of colormap name, colormap instance, single color instance or list of colors as supported by https://matplotlib.org/2.0.2/api/colors_api.html\n\n Returns:\n np.array: Numpy Array of colors as strings\n \"\"\"\n bar_colors = super().get_colors(cmap)\n\n # bar_colors is now a list\n n = len(bar_colors)\n if self.df.shape[1] > n:\n bar_colors = bar_colors * (self.df.shape[1] // n + 1)\n return np.array(bar_colors[: self.df.shape[1]])\n\n def get_label_position(self) -> typing.Tuple[float, float]:\n \"\"\" Get label position for period annotation\n\n Returns:\n typing.Tuple[float,float]: x,y of label\n \"\"\"\n if self.orientation == \"h\":\n x_label = 0.6\n y_label = 0.25 if self.sort == \"desc\" else 0.8\n else:\n x_label = 0.7 if self.sort == \"desc\" else 0.1\n y_label = 0.8\n return x_label, y_label\n\n def calculate_ranks(self, df: pd.DataFrame) -> pd.DataFrame:\n \"\"\" Calculate expanded dataframe to match length of animation\n\n Returns:\n typing.Tuple[pd.DataFrame,pd.DataFrame]: df_values contains interpolated values, df_rank contains interpolated rank\n \"\"\"\n\n df_rank = df.rank(axis=1, method=\"first\", ascending=False).clip(\n upper=self.n_visible + 1\n )\n if (self.sort == \"desc\" and self.orientation == \"h\") or (\n self.sort == \"asc\" and self.orientation == \"v\"\n ):\n # This flips all rankings, eg if n_visible = 5 then score 1 in table becomes (6-1 = 5)\n df_rank = self.n_visible + 1 - df_rank\n\n df_rank = self.get_interpolated_df(\n df_rank, self.steps_per_period, self.interpolate_period\n )\n # new_index = range(df.index.max() + 1)\n # df_rank = df_rank.reindex(new_index).interpolate()\n return df_rank\n\n def create_figure(self) -> typing.Tuple[plt.figure, plt.axes]:\n \"\"\" Create Bar chart figure\n\n Returns:\n typing.Tuple[plt.figure,plt.axes]: Figure & axes instance\n \"\"\"\n fig = plt.Figure(figsize=self.figsize, dpi=self.dpi)\n limit = (0.2, self.n_visible + 0.8)\n rect = self.calculate_new_figsize(fig)\n ax = fig.add_axes(rect)\n if self.orientation == \"h\":\n ax.set_ylim(limit)\n if self.fixed_max:\n ax.set_xlim(0, self.df.values.max().max() * 1.05 * 1.11)\n ax.grid(True, axis=\"x\", color=\"white\")\n ax.xaxis.set_major_formatter(ticker.StrMethodFormatter(\"{x:,.0f}\"))\n else:\n ax.set_xlim(limit)\n if self.fixed_max:\n ax.set_ylim(0, self.df.values.max().max() * 1.05 * 1.11)\n ax.grid(True, axis=\"y\", color=\"white\")\n ax.set_xticklabels(ax.get_xticklabels(), ha=\"right\", rotation=30)\n ax.yaxis.set_major_formatter(ticker.StrMethodFormatter(\"{x:,.0f}\"))\n\n ax.set_axisbelow(True)\n ax.tick_params(length=0, labelsize=self.tick_label_size, pad=2)\n ax.set_facecolor(\".9\")\n ax.set_title(self.title)\n for spine in ax.spines.values():\n spine.set_visible(False)\n return fig, ax\n\n def calculate_new_figsize(self, real_fig: plt.figure) -> typing.List[float]:\n \"\"\" Calculate figure size to allow for labels, etc\n\n Args:\n real_fig (plt.figure): Figure before calculation\n\n Returns:\n typing.List[float]: The dimensions [left, bottom, width, height] of the new axes. All quantities are in fractions of figure width and height.\n \"\"\"\n import io\n\n # df_values = self.prepare_data()\n fig = plt.Figure(figsize=self.figsize)\n # if self.title:\n # fig.tight_layout(rect=[0, 0, 1, 0.9]) # To include title\n ax = fig.add_subplot()\n fake_cols = [chr(i + 70) for i in range(self.df.shape[1])]\n\n max_val = self.df.max().max()\n if self.orientation == \"h\":\n ax.barh(fake_cols, [1] * self.df.shape[1])\n self.extracted_from_calculate_new_figsize_15(ax, 0, \"y\", fig, io)\n orig_pos = ax.get_position()\n ax.set_yticklabels(self.df.columns)\n ax.set_xticklabels([max_val] * len(ax.get_xticks()))\n else:\n ax.bar(fake_cols, [1] * self.df.shape[1])\n self.extracted_from_calculate_new_figsize_15(ax, 30, \"x\", fig, io)\n orig_pos = ax.get_position()\n ax.set_xticklabels(self.df.columns, ha=\"right\")\n ax.set_yticklabels([max_val] * len(ax.get_yticks()))\n\n fig.canvas.print_figure(io.BytesIO(), format=\"png\")\n new_pos = ax.get_position()\n\n coordx, prev_coordx = new_pos.x0, orig_pos.x0\n coordy, prev_coordy = new_pos.y0, orig_pos.y0\n old_w, old_h = self.figsize\n\n # if coordx > prev_coordx or coordy > prev_coordy:\n prev_w_inches = prev_coordx * old_w\n total_w_inches = coordx * old_w\n extra_w_inches = total_w_inches - prev_w_inches\n new_w_inches = extra_w_inches + old_w\n\n prev_h_inches = prev_coordy * old_h\n total_h_inches = coordy * old_h\n extra_h_inches = total_h_inches - prev_h_inches\n new_h_inches = extra_h_inches + old_h\n\n real_fig.set_size_inches(new_w_inches, new_h_inches)\n left = total_w_inches / new_w_inches\n bottom = total_h_inches / new_h_inches\n width = orig_pos.x1 - left\n height = orig_pos.y1 - bottom\n return [left, bottom, width, height]\n\n def extracted_from_calculate_new_figsize_15(self, ax, labelrotation, axis, fig, io):\n ax.tick_params(\n labelrotation=labelrotation, axis=axis, labelsize=self.tick_label_size\n )\n\n ax.set_title(self.title)\n fig.canvas.print_figure(io.BytesIO())\n\n def plot_bars(self, i: int) -> None:\n \"\"\" Plot bars in bar chart race on axes\n\n Args:\n i (int): index of current frame in animation\n \"\"\"\n bar_location = self.df_rank.iloc[i].values\n\n bar_location[np.isnan(bar_location)] = 0\n\n top_filt = (bar_location > 0) & (bar_location < self.n_visible + 1)\n bar_location = bar_location[top_filt]\n\n bar_length = self.df.iloc[i].values[top_filt]\n cols = self.df.columns[top_filt]\n colors = self.bar_colors[top_filt]\n\n if self.orientation == \"h\":\n self.ax.barh(\n bar_location,\n bar_length,\n ec=\"white\",\n tick_label=cols,\n color=colors,\n # **self.kwargs,\n )\n if not self.fixed_max:\n self.ax.set_xlim(self.ax.get_xlim()[0], bar_length.max() * 1.1)\n else:\n self.ax.bar(\n bar_location,\n bar_length,\n ec=\"white\",\n tick_label=cols,\n color=colors,\n **self.kwargs,\n )\n if not self.fixed_max:\n self.ax.set_ylim(self.ax.get_ylim()[0], bar_length.max() * 1.16)\n\n super().show_period(i)\n\n if self.label_bars:\n for text in self.ax.texts[int(bool(self.period_label)) :]:\n text.remove()\n if self.orientation == \"h\":\n zipped = zip(bar_length, bar_location)\n else:\n zipped = zip(bar_location, bar_length)\n\n for x1, y1 in zipped:\n xtext, ytext = self.ax.transLimits.transform((x1, y1))\n if self.orientation == \"h\":\n xtext += 0.01\n text = f\"{x1:,.0f}\"\n rotation = 0\n ha = \"left\"\n va = \"center\"\n else:\n ytext += 0.015\n text = f\"{y1:,.0f}\"\n rotation = 90\n ha = \"center\"\n va = \"bottom\"\n xtext, ytext = self.ax.transLimits.inverted().transform((xtext, ytext))\n self.ax.text(\n xtext,\n ytext,\n text,\n ha=ha,\n rotation=rotation,\n fontsize=self.bar_label_size,\n va=va,\n )\n\n if self.perpendicular_bar_func:\n if isinstance(self.perpendicular_bar_func, str):\n val = pd.Series(bar_length).agg(self.perpendicular_bar_func)\n else:\n values = self.df.iloc[i]\n ranks = self.df_rank.iloc[i]\n val = self.perpendicular_bar_func(values, ranks)\n\n if not self.ax.lines:\n if self.orientation == \"h\":\n self.ax.axvline(val, lw=8, color=\".5\", zorder=0.5)\n else:\n self.ax.axhline(val, lw=8, color=\".5\", zorder=0.5)\n else:\n line = self.ax.lines[0]\n if self.orientation == \"h\":\n line.set_xdata([val] * 2)\n else:\n line.set_ydata([val] * 2)\n\n def anim_func(self, i: int) -> None:\n \"\"\" Animation function, removes all bars and updates legend/period annotation.\n\n Args:\n i (int): Frame index for animation\n \"\"\"\n if self.enable_progress_bar:\n self.update_progress_bar()\n for bar in self.ax.containers:\n bar.remove()\n self.plot_bars(i)\n if self.period_fmt:\n self.show_period(i)\n\n def init_func(self):\n \"\"\" Initialization function for animation\n \"\"\"\n self.plot_bars(0)\n\n\n@attr.s\nclass ScatterChart(_BaseChart):\n \"\"\"\n ScatterChart to be generate animated plot with `matplotlib.pyplot.axes.scatter`\n\n Accepts kwargs as detailed on https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.scatter.html\n\n Args:\n _BaseChart : BaseChart constructor that all charts share\n\n Raises:\n ValueError: Size label must be a column in DataFrame\n \"\"\"\n\n size: typing.Union[int, str] = attr.ib()\n add_legend: bool = attr.ib()\n\n def __attrs_post_init__(self):\n \"\"\" Properties to be determined after initialization\n \"\"\"\n super().__attrs_post_init__()\n self.colors = self.get_colors(self.cmap)\n self._points: typing.Dict = {}\n for name in self.data_cols:\n self._points[name] = {\"x\": [], \"y\": [], \"size\": []}\n if isinstance(self.size, str) and self.size not in self.data_cols:\n raise ValueError(\n f\"Size provided as string: {self.size}, not present in dataframe columns\"\n )\n\n def plot_point(self, i: int) -> None:\n \"\"\"\n Plot points for scatter on chart\n\n\n Args:\n i (int): Frame to be plotted, will take slice of DataFrame at this index\n\n Raises:\n ValueError: Size label must be a column in DataFrame\n \"\"\"\n if not self.fixed_max:\n super().set_x_y_limits(self.df, i, self.ax)\n # If fixed_max is true then run it once to improve performance\n elif i == 0:\n super().set_x_y_limits(self.df, i, self.ax)\n j = 0\n for name, color in zip(self.data_cols, self.colors):\n self._points[name][\"x\"] = self.df[name].index[: i + 1]\n self._points[name][\"y\"] = self.df[name].iloc[: i + 1]\n if isinstance(self.size, str) and self.size in self.data_cols:\n self._points[name][\"size\"] = abs(self.df[self.size].iloc[: i + 1])\n else:\n self._points[name][\"size\"] = np.full((i + 1), self.size)\n if i == 0:\n self.sc = self.ax.scatter(\n self._points[name][\"x\"],\n self._points[name][\"y\"],\n s=self._points[name][\"size\"],\n color=color,\n label=name,\n edgecolors=\"none\",\n **self.kwargs,\n )\n if self.add_legend:\n handles, labels = self.ax.get_legend_handles_labels()\n legend = self.ax.legend(handles[:], labels[:], fontsize=\"x-small\")\n for handle in legend.legendHandles:\n handle.set_sizes([15])\n else:\n # update all points\n self.ax.collections[j].set_color(color)\n if isinstance(self.df.index, pd.DatetimeIndex):\n # date_array = np.c_[mdates.date2num(self._points[name][\"x\"]), self._points[name][\"y\"]]\n self.ax.collections[j].set_offsets(\n np.c_[\n mdates.date2num(self._points[name][\"x\"]),\n self._points[name][\"y\"],\n ]\n )\n else:\n self.ax.collections[j].set_offsets(\n np.c_[self._points[name][\"x\"], self._points[name][\"y\"]]\n )\n self.ax.collections[j].set_sizes(self._points[name][\"size\"])\n j += 1\n\n def anim_func(self, i: int) -> None:\n \"\"\" Animation function, plots all scatter points and updates legend/period annotation.\n\n Args:\n i (int): Index of frame of animation\n \"\"\"\n if self.enable_progress_bar:\n self.update_progress_bar()\n self.plot_point(i)\n if self.period_fmt:\n self.show_period(i)\n\n def init_func(self) -> None:\n \"\"\" Initialization function for animation\n \"\"\"\n self.ax.scatter([], [])\n\n\n@attr.s\nclass LineChart(_BaseChart):\n \"\"\" Animated Line Chart implementation\n\n Args:\n BaseChart (BaseChart): Shared Base Chart class inherit to all charts\n\n Returns:\n LineChart: Animated Line Chart class for use with multiple plots or save\n \"\"\"\n\n line_width: int = attr.ib()\n label_events: typing.Dict[str, str] = attr.ib()\n fill_under_line_color: str = attr.ib()\n add_legend: bool = attr.ib()\n\n def __attrs_post_init__(self):\n \"\"\" Properties to be determined after initialization\n \"\"\"\n super().__attrs_post_init__()\n self.line_colors = self.get_colors(self.cmap)\n self._lines: typing.Dict = {}\n for name in self.data_cols:\n self._lines[name] = {\"x\": [], \"y\": []}\n\n def plot_line(self, i: int) -> None:\n \"\"\" Function for plotting all lines in dataframe\n\n Args:\n i (int): Index of frame for animation\n \"\"\"\n # TODO Somehow implement n visible lines?\n if not self.fixed_max:\n super().set_x_y_limits(self.df, i, self.ax)\n # If fixed_max is true then run it once to improve performance\n elif i == 0:\n super().set_x_y_limits(self.df, i, self.ax)\n j = 0\n # fills = [\"\"]\n for name, color in zip(self.data_cols, self.line_colors):\n self._lines[name][\"x\"] = self.df[name].index[: i + 1]\n self._lines[name][\"y\"] = self.df[name].iloc[: i + 1]\n if i == 0:\n self.ax.plot(\n self._lines[name][\"x\"],\n self._lines[name][\"y\"],\n self.line_width,\n color=color,\n label=name,\n **self.kwargs,\n )\n if self.add_legend:\n handles, labels = self.ax.get_legend_handles_labels()\n self.ax.legend(handles[::2], labels[::2], fontsize=\"x-small\")\n # if self.fill_under_line_color:\n # self.ax.fill_between(\n # self._lines[name][\"x\"],\n # self._lines[name][\"y\"],\n # color=self.get_single_color(self.fill_under_line_color),\n # alpha=0.5,\n # )\n # fills = self.ax.collections[-1]\n else:\n # update all lines\n self.ax.lines[j].set_color(color)\n self.ax.lines[j].set_data(\n self._lines[name][\"x\"], self._lines[name][\"y\"]\n )\n j += 1\n if self.fill_under_line_color:\n # Fills need to be removed and re-generated, or else `matplotlib`\n # adds a new one per frame, performance degrades and alpha doesn't show properly.\n if i == 0:\n self.ax.fill_between(\n self._lines[name][\"x\"],\n self._lines[name][\"y\"],\n color=self.get_single_color(self.fill_under_line_color),\n alpha=0.5,\n )\n self.fills = self.ax.collections[-1]\n else:\n self.fills.remove()\n self.ax.fill_between(\n self._lines[name][\"x\"],\n self._lines[name][\"y\"],\n color=self.get_single_color(self.fill_under_line_color),\n alpha=0.5,\n )\n self.fills = self.ax.collections[-1]\n\n # Set label_events once, it improves loop performance by x 4.\n if self.label_events and i == 0:\n # from datetime import datetime\n # import numpy as np\n\n for pos, (label, date) in enumerate(self.label_events.items()):\n event_index = (self.df.index <= date).sum()\n # if i >= event_index:\n event_start = self.df.index[event_index]\n trans = transforms.blended_transform_factory(\n self.ax.transData, self.ax.transAxes\n )\n\n self.ax.axvline(event_start, lw=8, color=\".5\", zorder=0.5)\n self.ax.text(\n event_start,\n 0.9 - (pos * 0.1),\n label,\n transform=trans,\n fontsize=\"x-small\",\n )\n\n def anim_func(self, i: int) -> None:\n \"\"\" Animation function, updates all lines and legend/period annotation.\n\n Args:\n i (int): Index of frame of animation\n \"\"\"\n if self.enable_progress_bar:\n self.update_progress_bar()\n self.plot_line(i)\n if self.period_fmt:\n self.show_period(i)\n\n def init_func(self) -> None:\n \"\"\" Initialization function for animation\n \"\"\"\n self.ax.plot([], [])\n\n\n@attr.s\nclass PieChart(_BaseChart):\n \"\"\" Animated Pie Chart implementation\n\n Args:\n BaseChart (BaseChart): Shared Base Chart class inherit to all charts\n\n Returns:\n PieChart: Animated Pie Chart class for use with multiple plots or save\n \"\"\"\n\n def __attrs_post_init__(self):\n \"\"\" Properties to be determined after initialization\n \"\"\"\n super().__attrs_post_init__()\n self.wedge_colors = self.get_colors(self.cmap)\n\n self.wedge_colors = dict(zip(self.data_cols, self.wedge_colors))\n\n self._wedges: typing.Dict = {}\n for name in self.data_cols:\n self._wedges[name] = {\"size\": []}\n\n def plot_wedge(self, i: int) -> None:\n \"\"\" Function for plotting all lines in dataframe\n\n Args:\n i (int): Index of frame for animation\n \"\"\"\n\n for text in self.ax.texts[int(bool(self.period_fmt)) :]:\n text.remove()\n\n # super().set_x_y_limits(self.df, i)\n # print(self.df[self.data_cols].notnull())\n filt_nan = self.df[self.data_cols].iloc[i].notnull()\n\n # print(self.df[self.data_cols].iloc[i][filt_nan])\n\n wedges = self.df[self.data_cols].iloc[i][filt_nan]\n\n wedge_color_list = []\n for label in wedges.index:\n wedge_color_list.append(self.wedge_colors[label])\n\n self.ax.pie(\n wedges.values, labels=wedges.index, colors=wedge_color_list, **self.kwargs\n )\n\n # for name, color in zip(self.data_cols, self.wedge_colors):\n\n # self._wedges[name][\"size\"].append(self.df[name].index[i])\n # # self._lines[name][\"y\"].append(self.df[name].iloc[i])\n # self.ax.pie(\n # self._wedges[name][\"size\"],\n # label=name,\n # color=color,\n # **self.kwargs,\n # )\n\n def anim_func(self, i: int) -> None:\n \"\"\" Animation function, removes all wedges and updates legend/period annotation.\n\n Args:\n i (int): Index of frame of animation\n \"\"\"\n if self.enable_progress_bar:\n self.update_progress_bar()\n for wedge in self.ax.patches:\n wedge.remove()\n self.plot_wedge(i)\n if self.period_fmt:\n self.show_period(i)\n\n def init_func(self) -> None:\n \"\"\" Initialization function for animation\n \"\"\"\n self.ax.pie([])\n\n\n@attr.s\nclass BarChart(_BaseChart):\n \"\"\" Animated Bar Chart implementation\n\n Args:\n BaseChart (BaseChart): Shared Base Chart class inherit to all charts\n\n Returns:\n BarChart: Animated Bar Chart class for use with multiple plots or save\n \"\"\"\n\n def __attrs_post_init__(self):\n \"\"\" Properties to be determined after initialization\n \"\"\"\n\n super().__attrs_post_init__()\n self.bar_colors = self.get_colors(self.cmap)\n\n self._bars: typing.Dict = {}\n for name in self.data_cols:\n self._bars[name] = {\"x\": [], \"y\": []}\n\n def plot_bars(self, i: int) -> None:\n \"\"\" Function for plotting all lines in dataframe\n\n Args:\n i (int): Index of frame for animation\n \"\"\"\n if not self.fixed_max:\n super().set_x_y_limits(self.df, i, self.ax)\n self.ax.set_ylim(\n self.df.iloc[: i + 1].values.min(),\n self.df.iloc[: i + 1].values.max() + 1e-6,\n )\n # If fixed_max is true then run it once to improve performance\n elif i == 0:\n super().set_x_y_limits(self.df, i, self.ax)\n # bars are flat at the bottom/top, so no need to apply a tolerance like\n # with line/scatter charts.\n self.ax.set_ylim(self.df.values.min(), self.df.values.max())\n\n for name, color in zip(self.data_cols, self.bar_colors):\n self._bars[name][\"x\"].append(self.df[name].index[i])\n self._bars[name][\"y\"].append(self.df[name].iloc[i])\n self.ax.bar(\n self._bars[name][\"x\"],\n self._bars[name][\"y\"],\n # self.line_width,\n color=color,\n **self.kwargs,\n )\n\n def anim_func(self, i: int) -> None:\n \"\"\" Animation function, removes all bars and updates legend/period annotation.\n\n Args:\n i (int): Index of frame of animation\n \"\"\"\n if self.enable_progress_bar:\n self.update_progress_bar()\n for bar in self.ax.containers:\n bar.remove()\n self.plot_bars(i)\n if self.period_fmt:\n self.show_period(i)\n\n def init_func(self) -> None:\n \"\"\" Initialization function for animation\n \"\"\"\n self.ax.bar([], [])\n\n\n@attr.s\nclass BubbleChart(_BaseChart):\n \"\"\"\n Multivariate Bubble charts from MultiIndex\n\n Generate animated bubble charts with multivariate data (x,y at a minimum must be supplied)\n Optionally supply data for colour and/or size\n\n Args:\n _BaseChart ([type]): Base chart for all chart classes\n\n Raises:\n ValueError: [description]\n \"\"\"\n\n x_data_label: str = attr.ib()\n y_data_label: str = attr.ib()\n size_data_label: typing.Union[int, float, str] = attr.ib()\n color_data_label: str = attr.ib()\n vmin: typing.Union[int, float] = attr.ib()\n vmax: typing.Union[int, float] = attr.ib()\n\n def __attrs_post_init__(self):\n \"\"\" Properties to be determined after initialization\n \"\"\"\n super().__attrs_post_init__()\n self.colors = self.get_colors(self.cmap)\n # Typically bubble plots are fixed scales on X & Y. Having varying\n # limits will look odd in most cases. So force to True.\n # self.fixed_max = True\n self._points: typing.Dict = {}\n self.column_keys = self.df.columns.get_level_values(level=0).unique().tolist()\n # self.data_cols = self.df.columns.get_level_values(level=1).unique().tolist()\n self.mapping = {\"x\": self.x_data_label, \"y\": self.y_data_label}\n if isinstance(self.size_data_label, str):\n self.mapping[\"size\"] = self.size_data_label\n if (\n isinstance(self.color_data_label, str)\n and self.color_data_label in self.column_keys\n ):\n self.mapping[\"color\"] = self.color_data_label\n # setting up colorbar axes & limits when `color` is a pd column\n self.color_bar = True\n if self.cmap == \"dark24\": # TODO: register \"dark24\" as a Colormap\n self.cmap = \"jet\"\n if self.vmin is None:\n self.vmin = np.floor(self.df[self.mapping[\"color\"]].values.min())\n if self.vmax is None:\n self.vmax = np.ceil(self.df[self.mapping[\"color\"]].values.max())\n else:\n self.color_bar = False\n if self.x_data_label is None or self.y_data_label is None:\n raise ValueError(\"X Y labels must be provided at a minimum\")\n if not (\n self.x_data_label in self.column_keys\n and self.y_data_label in self.column_keys\n ):\n raise ValueError(\n f\"Provided keys must be in level 0 multi index, possible values: {self.column_keys}\"\n )\n if self.fixed_max:\n # scale to allow canvas to attempt covering for bubble size when near min/max axes values\n ax_xscale = (\n self.df[self.mapping[\"x\"]].values.max()\n - self.df[self.mapping[\"x\"]].values.min()\n ) * 0.05\n ax_yscale = (\n self.df[self.mapping[\"y\"]].values.max()\n - self.df[self.mapping[\"y\"]].values.min()\n ) * 0.05\n BBox = (\n self.df[self.mapping[\"x\"]].values.min() - ax_xscale,\n self.df[self.mapping[\"x\"]].values.max() + ax_xscale,\n self.df[self.mapping[\"y\"]].values.min() - ax_yscale,\n self.df[self.mapping[\"y\"]].values.max() + ax_yscale,\n )\n self.ax.set_xlim(BBox[0], BBox[1])\n self.ax.set_ylim(BBox[2], BBox[3])\n\n # TODO Add geopandas for map plots\n # self.ax = self.show_image(\n # self.ax,\n # \"C:\\\\Users\\\\jackm\\\\Documents\\\\GitHub\\\\pandas-alive\\\\data\\\\nsw_map.png\",\n # extent=BBox,\n # zorder=0,\n # aspect=\"equal\",\n # )\n\n def plot_point(self, i: int) -> None:\n \"\"\"\n Plot points from MultiIndexed DataFrame\n\n Optionally size & colour can be provided and if so, the string provided must be present in the level 0 column labels\n\n Args:\n i (int): Frame to plot, will slice DataFrame at this index\n \"\"\"\n for output_key, column_key in self.mapping.items():\n self._points[output_key] = self.df[column_key].iloc[i]\n\n self.sc = self.ax.scatter(\n x=self._points[\"x\"],\n y=self._points[\"y\"],\n s=self._points[\"size\"]\n if isinstance(self.size_data_label, str)\n else self.size_data_label,\n c=self._points[\"color\"] if self.color_bar else self.color_data_label,\n cmap=self.cmap,\n alpha=0.8,\n **self.kwargs,\n )\n # setting up colorbar when color is a pd column and doesn't exist\n # already from a previous animation run with the same custom figure.\n if i == 0 and self.color_bar:\n self.cbar = self.fig.colorbar(self.sc)\n # this sets colorbar scales & settings to remain constant for all frames\n self.cbar.ax.tick_params(labelsize=\"small\")\n self.cbar.set_label(\n label=\"Size & Colour = \" + self.color_data_label, fontsize=\"x-small\"\n )\n if self.color_bar:\n # this is required for all iterations to update colour on bubbles\n self.sc.set_clim(self.vmin, self.vmax)\n\n def anim_func(self, i: int) -> None:\n \"\"\" Animation function, removes bubbles and updates legend/period annotation.\n\n Args:\n i (int): Index of frame of animation\n \"\"\"\n if self.enable_progress_bar:\n self.update_progress_bar()\n for path in self.ax.collections:\n path.remove()\n self.plot_point(i)\n if self.period_fmt:\n self.show_period(i)\n\n def init_func(self) -> None:\n \"\"\" Initialization function for animation\n \"\"\"\n self.ax.scatter([], [])\n","repo_name":"JackMcKew/pandas_alive","sub_path":"pandas_alive/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":32690,"program_lang":"python","lang":"en","doc_type":"code","stars":557,"dataset":"github-code","pt":"81"} +{"seq_id":"30793901008","text":"import json\nfrom pathlib import Path\nfrom .io import *\nfrom .genome import *\n\n\n_logger = logging.getLogger(\"cnvpytor.export\")\n\n\nclass Wiggle:\n def __init__(self, filename):\n \"\"\"\n creates bigwig file\n Parameters\n ----------\n filename : str\n Path for the bigwig filename\n \"\"\"\n self.filename = filename\n self.file = None\n import pyBigWig\n\n if not Path(filename).exists():\n try:\n self.file = pyBigWig.open(filename, 'w')\n except IOError as e:\n print(\"Unable to open file {}! Error: {}\".format(filename, e))\n except RuntimeError as e:\n print(\"Unable to open file {}! Error: {}\".format(filename, e))\n else:\n self.file = pyBigWig.open(filename)\n\n def add_header_list(self, chr_len_list):\n \"\"\"\n Add header to the bigwig file\n Parameters\n ----------\n chr_len_list : list of tuple\n chromosome name and length list.\n\n \"\"\"\n self.file.addHeader(chr_len_list)\n\n def add_fixedstep(self, chrom, position_int, value_list, span=1, step=1):\n \"\"\"\n Add fixed step formatted data\n Parameters\n ----------\n chrom : str\n chromosome name\n position_int : int\n start position\n value_list : list of values\n input values\n span : int\n step : int\n\n \"\"\"\n self.file.addEntries(chrom, position_int, values=value_list, span=span, step=step)\n\n def get_cnvpytor_signal(self, md5, chrom, bin_size, signal, flag):\n \"\"\"\n Get a signal from pytor file\n\n parameters\n -------------\n md5:\n chrom: str\n chromosome name\n bin_size: int\n bin size\n signal: str\n name of the cnvpytor signal\n flag: int\n Binary flag\n returns\n -------------\n signal_details : numpy.nparray\n Array contains data\n \"\"\"\n signal_details = md5.get_signal(chrom, bin_size, signal, flag)\n return signal_details\n\n def get_chrom_list(self, md5):\n \"\"\"\n Get list of chromosome name and its length\n\n parameters\n ------------\n md5: cnvpyto object\n\n returns\n ------------\n chr_len_list: list of tuples\n list contain tuples with chr name and length\n \"\"\"\n chr_len = md5.get_signal(None, None, \"chromosome lengths\")\n chr_len_list = list(zip(chr_len[::2].astype(str), chr_len[1::2].astype(int)))\n return chr_len_list\n\n def create_wig_offset_transform(self, md5, chr_list, bin_size, signal, flag, offset):\n \"\"\"\n parameters\n -------------\n md5:\n chr_list: list\n list of chromosomes\n bin_size: int\n bin size\n signal: str\n name of the signal\n flag: int\n Binary flag\n offset:\n\n returns\n -------------\n \"\"\"\n # add chr_list to add wig header\n self.add_header_list(chr_list)\n\n # add the data\n for (chrom, length) in chr_list:\n signal_details = md5.get_signal(chrom, bin_size, signal, flag)\n if isinstance(signal_details, np.ndarray):\n signal_value_list = signal_details[()]\n signal_value_list[signal_value_list != 0] += offset\n\n signal_value_list = np.absolute(signal_value_list)\n self.add_fixedstep(chrom, 0, signal_value_list, span=bin_size, step=bin_size)\n\n def create_wig(self, md5, chr_list, bin_size, signal, flag):\n \"\"\"\n parameters\n -------------\n md5:\n chr_list: list\n list of chromosome\n bin_size: int\n bin size\n signal: str\n signal name\n flag: int\n Binary flag\n\n\n \"\"\"\n # add chr_list to add wig header\n self.add_header_list(chr_list)\n\n # add the data\n for (chrom, length) in chr_list:\n signal_details = md5.get_signal(chrom, bin_size, signal, flag)\n if isinstance(signal_details, np.ndarray):\n signal_value_list = signal_details[()]\n self.add_fixedstep(chrom, 0, signal_value_list, span=bin_size, step=bin_size)\n\n def __del__(self):\n\n if self.file:\n self.file.close()\n\n\nclass ExportJBrowse:\n\n rd_signal_dct = {\n \"RD\": {\n \"FLAG\": [0, 0x0010],\n \"color\": [\"gray\", \"black\"]\n },\n \"RD partition\": {\n \"FLAG\": [0x0010],\n \"color\": [\"red\"]\n },\n \"RD call\": {\n \"FLAG\": [0x0010],\n \"color\": [\"green\"]\n }\n }\n snp_signal_dct = {\n \"SNP baf\": {\n \"FLAG\": [0x0100],\n \"color\": [\"gray\"],\n \"nonCont\": [True],\n },\n \"SNP i1\": {\n \"FLAG\": [0x0100, 0x0100],\n \"color\": [\"red\", \"red\"],\n \"nonCont\": [True, True],\n \"offset\": [0.5, -0.5]\n },\n }\n\n signal_dct = {\n \"RD\": \"his_rd_p_%(bin_size)d%(rd_flag)s\",\n \"RD partition\": \"his_rd_p_%(bin_size)d_partition%(rd_flag)s\",\n \"RD call\": \"his_rd_p_%(bin_size)d_partition%(rd_flag)s_merge\",\n \"SNP baf\": \"snp_baf_%(bin_size)d%(snp_flag)s\",\n \"SNP maf\": \"snp_maf_%(bin_size)d%(snp_flag)s\",\n \"SNP i1\": \"snp_i1_%(bin_size)d%(snp_flag)s\",\n \"SNP i1 partition\": \"snp_i1_%(bin_size)d%(snp_flag)s_partition\",\n\n }\n\n def __init__(self, files, dir_name):\n \"\"\"\n Exports CNVpytor data to a directory\n Parameters\n ----------\n files : str\n CNVpytor files path\n dir_name: str\n Export directory path\n \"\"\"\n self.files = files\n self.dir = Path(dir_name)\n self.io = [IO(f, ro=True) for f in files]\n self.export_dir = self.export_create_dir()\n\n @property\n def pytor_names(self):\n \"\"\"\n Get name list for for pytor files\n return\n -------\n name_list: list\n filename for the pytor files\n \"\"\"\n name_list = []\n for filename in self.files:\n name_list.append(Path(filename).resolve().stem)\n return name_list\n\n @property\n def export_directory(self):\n \"\"\"\n return export directory name\n return\n -------\n export directory path\n \"\"\"\n if self.dir.is_dir():\n if len(self.files) > 1:\n # for multiple input file\n default_name = self.dir.joinpath(\"cnvpytor_jbrowse_export\")\n\n else:\n # for single_input_file\n default_name = self.dir.joinpath(\"jbrowse_{}\".format(self.pytor_names[0]))\n\n if default_name.exists():\n tmp_name = default_name\n i = 1\n while default_name.exists():\n update_name = \"{}({})\".format(tmp_name.name, i)\n default_name = default_name.with_name(update_name)\n i = i+1\n return default_name\n else:\n if self.dir.parent.exists():\n return self.dir\n else:\n _logger.error(\"Error: incorrect export path: {}\".format(self.dir))\n exit(0)\n\n def export_create_dir(self):\n \"\"\"\n create export directory\n return\n -------\n main_dir: str\n export directory path\n \"\"\"\n main_dir = self.export_directory\n main_dir.mkdir(parents=True, exist_ok=True)\n _logger.info(\"CNVpytor data exporting for JBrowse view in {}\".format(main_dir))\n return main_dir\n\n @property\n def export_data_dir_list(self):\n \"\"\"\n create \"bw\" directory\n return\n ---------\n data_dir_list: list\n list of filenames\n \"\"\"\n data_dir = self.export_dir.joinpath(\"bw\")\n data_dir.mkdir(parents=True, exist_ok=True)\n data_dir_list = []\n for root_name in self.pytor_names:\n root_data = data_dir.joinpath(root_name)\n root_data.mkdir(parents=True, exist_ok=True)\n data_dir_list.append(root_data)\n return data_dir_list\n\n @property\n def export_seq_dir(self):\n \"\"\"\n create \"seq\" directory\n return\n ---------\n seq_dir: path\n 'seq\" directory path\n\n \"\"\"\n seq_dir = self.export_dir.joinpath(\"seq\")\n seq_dir.mkdir(parents=True, exist_ok=True)\n return seq_dir\n\n @property\n def export_tracklist_file(self):\n \"\"\"\n Get the path for trackList.json file\n return\n ---------\n track_list: path\n path for trackList.json file\n \"\"\"\n track_list = self.export_dir.joinpath(\"trackList.json\")\n return track_list\n\n @property\n def export_ref_file(self):\n \"\"\"\n Get the path for refSeqs.json file\n return\n ---------\n ref_file : path\n path for refSeqs.json file\n \"\"\"\n ref_file = self.export_seq_dir.joinpath(\"refSeqs.json\")\n return ref_file\n\n def signal_name(self, bin_size, signal, flags=0):\n \"\"\"\n Read data from the pytor file using bin_size, signal name and data flag\n\n parameter\n ------------\n :param bin_size: int\n :param signal: string\n :param flags: Flag\n\n return\n ------------\n\n \"\"\"\n if signal in self.signal_dct:\n try:\n return self.signal_dct[signal] % {\"bin_size\": bin_size, \"rd_flag\": Signals().suffix_rd_flag(flags),\n \"snp_flag\": Signals().suffix_snp_flag(flags),\n \"flag\": Signals().suffix_flag(flags)}\n except TypeError:\n return None\n else:\n return None\n\n def rd_chr_bin(self, root_io):\n \"\"\"\n Read 'RD' data from pytor file\n parameter\n -----------\n root_io: io object\n cnvpytor io object\n return\n -----------\n chrs: string\n chromosome names\n bss: int\n length of the chromosome\n \"\"\"\n chr_bs = root_io.chromosomes_bin_sizes_with_signal(\"RD\")\n chrs = {}\n bss = []\n for c, b in chr_bs:\n if c not in chrs:\n chrs[c] = []\n chrs[c].append(int(b))\n if int(b) not in bss:\n bss.append(int(b))\n return chrs, bss\n\n def snp_chr_bin(self, root_io):\n \"\"\"\n read snp likelihood information pytor pytor file\n parameter\n -----------\n root_io: io object\n cnvpytor io object\n return\n -----------\n chrs: string\n chromosome names\n bss: int\n length of the chromosome\n \"\"\"\n chr_bs = root_io.chromosomes_bin_sizes_with_signal(\"SNP likelihood\", FLAG_USEMASK)\n chrs = {}\n bss = []\n for c, b in chr_bs:\n if c not in chrs:\n chrs[c] = []\n chrs[c].append(int(b))\n if int(b) not in bss:\n bss.append(int(b))\n return chrs, bss\n\n @staticmethod\n def create_bigwig(root_io, bigwig_file, chr_list, bin_size, signal_name, flag, offset=None):\n \"\"\"\n Creates big wig file using the following criteria\n\n parameter\n -----------\n root_io: io object\n cnvpytor io object\n bigwig_file: str\n Name of the bigwig wilf\n chr_list: list\n chromosome names\n bin_size: int\n bin size\n signal_name: string\n name of the singal\n flag: int\n Binary flag\n offset:\n\n return\n -----------\n \"\"\"\n wig = None\n for (chrom, length) in chr_list:\n signal_details = root_io.get_signal(chrom, bin_size, signal_name, flag)\n if isinstance(signal_details, np.ndarray):\n signal_value_list = signal_details[()]\n if offset is not None:\n signal_value_list[signal_value_list != 0] += offset\n signal_value_list = np.absolute(signal_value_list)\n\n if not isinstance(wig, Wiggle):\n wig = Wiggle(bigwig_file)\n wig.add_header_list(chr_list)\n\n wig.add_fixedstep(chrom, 0, signal_value_list, span=bin_size, step=bin_size)\n\n def rd_signal(self):\n \"\"\"\n Get read depth signal and write to bigwig file\n \"\"\"\n _logger.debug(\"Create Read depth related signals\")\n for root_index, root_io in enumerate(self.io):\n _logger.info(\"JBrowse export: RD related data for {}\".format(self.pytor_names[root_index]))\n rd_chr, rd_bin = self.rd_chr_bin(root_io)\n\n # get chr list\n chr_len = root_io.get_signal(None, None, \"chromosome lengths\")\n chr_list = list(zip(chr_len[::2].astype(str), chr_len[1::2].astype(int)))\n\n for signal_name, signal_dct in self.rd_signal_dct.items():\n _logger.info(\"JBrowse export: RD signal {}\".format(signal_name))\n for index, flag in enumerate(signal_dct['FLAG']):\n for bin_size in rd_bin:\n signal = self.signal_name(bin_size, signal_name, flag)\n bigwig_filename = \"{}.bw\".format(signal)\n bigwig_file = self.export_data_dir_list[root_index].joinpath(bigwig_filename)\n bigwig_file = str(bigwig_file)\n\n self.create_bigwig(root_io, bigwig_file, chr_list, bin_size, signal_name, flag)\n\n def snp_signal(self):\n \"\"\"\n Get signal and write to file\n\n \"\"\"\n _logger.debug(\"Create SNP related signals\")\n for root_index, root_io in enumerate(self.io):\n _logger.info(\"JBrowse export: SNP related data for {}\".format(self.pytor_names[root_index]))\n snp_chr, snp_bin = self.snp_chr_bin(root_io)\n\n # get chr list\n chr_len = root_io.get_signal(None, None, \"chromosome lengths\")\n chr_list = list(zip(chr_len[::2].astype(str), chr_len[1::2].astype(int)))\n\n for signal_name, signal_dct in self.snp_signal_dct.items():\n _logger.info(\"JBrowse export: SNP signal {}\".format(signal_name))\n for index, flag in enumerate(signal_dct['FLAG']):\n if \"offset\" in signal_dct:\n offset = signal_dct['offset'][index]\n for bin_size in snp_bin:\n signal = self.signal_name(bin_size, signal_name, flag)\n bigwig_filename = \"{}_offset{}.bw\".format(signal, offset)\n bigwig_file = self.export_data_dir_list[root_index].joinpath(bigwig_filename)\n bigwig_file = str(bigwig_file)\n\n self.create_bigwig(root_io, bigwig_file, chr_list, bin_size, signal_name, flag,\n offset=offset)\n\n else:\n for bin_size in snp_bin:\n signal = self.signal_name(bin_size, signal_name, flag)\n bigwig_filename = \"{}.bw\".format(signal)\n bigwig_file = self.export_data_dir_list[root_index].joinpath(bigwig_filename)\n bigwig_file = str(bigwig_file)\n\n self.create_bigwig(root_io, bigwig_file, chr_list, bin_size, signal_name, flag)\n\n @staticmethod\n def add_config_reference():\n track_dct = {'formatVersion': 1, \"plugins\": [\"MultiBigWig\", \"MultiScaleBigWig\"], 'tracks': []}\n track_dct['tracks'].append({\n \"category\": \"Reference sequence\",\n \"chunkSize\": 20000,\n \"key\": \"Reference sequence\",\n \"label\": \"DNA\",\n \"seqType\": \"dna\",\n \"storeClass\": \"JBrowse/Store/Sequence/StaticChunked\",\n \"type\": \"SequenceTrack\",\n \"urlTemplates\": \"seq/{refseq_dirpath}/{refseq}-\"\n })\n return track_dct\n\n def add_rd_config_track(self):\n \"\"\"\n Add read depth tracks\n returns\n -----------\n track_dct_list: dict\n read depth config setings\n \"\"\"\n _logger.debug(\"Get RD config track\")\n track_dct_list = []\n for root_index, root_io in enumerate(self.io):\n rd_chr, rd_bin = self.rd_chr_bin(root_io)\n url_template_dct = []\n for signal_name, signal_dct in self.rd_signal_dct.items():\n if 'FLAG' in signal_dct:\n for index, flag in enumerate(signal_dct['FLAG']):\n suffix_rd_flag = Signals.suffix_rd_flag(flag)\n signal_id = \"{}_{}{}\".format(self.pytor_names[root_index], signal_name, suffix_rd_flag)\n scales = {}\n for bin_size in rd_bin:\n signal = self.signal_name(bin_size, signal_name, flag)\n bigwig_filename = \"{}.bw\".format(signal)\n bigwig_file = self.export_data_dir_list[root_index].joinpath(bigwig_filename)\n bigwig_current_path = Path(bigwig_file.parent.parent.name).joinpath(bigwig_file.parent.name, bigwig_file.name).as_posix()\n if bigwig_file.exists():\n scales[bin_size] = bigwig_current_path\n\n if len(scales) > 0:\n url_template_dct.append({\n \"storeClass\": \"MultiScaleBigWig/Store/SeqFeature/MultiScaleBigWig\",\n \"scales\": scales,\n \"name\": signal_id,\n \"color\": signal_dct['color'][index],\n\n })\n if len(url_template_dct) > 0:\n\n track_dct = {\n \"category\": self.pytor_names[root_index],\n 'autoscale': 'local',\n \"storeClass\": \"MultiBigWig/Store/SeqFeature/MultiBigWig\",\n \"showTooltips\": True,\n \"showLabels\": True,\n \"clickTooltips\": True,\n \"key\": \"RD\",\n \"label\": \"RD {}\".format(self.pytor_names[root_index]),\n \"type\": \"MultiBigWig/View/Track/MultiWiggle/MultiXYPlot\",\n 'useStdDev': True,\n 'urlTemplates': url_template_dct\n\n }\n track_dct_list.append(track_dct)\n return track_dct_list\n\n def add_snp_config_track(self):\n \"\"\"\n Add snp track\n return\n ---------\n track_dct_list: dict\n settings for snp config\n \"\"\"\n _logger.debug(\"Get SNP config track info\")\n track_dct_list = []\n for root_index, root_io in enumerate(self.io):\n snp_url_dct_list = []\n snp_chr, snp_bin = self.snp_chr_bin(root_io)\n for signal_name, signal_dct in self.snp_signal_dct.items():\n for index, flag in enumerate(signal_dct['FLAG']):\n suffix_flag = Signals.suffix_snp_flag(flag)\n scales = {}\n if \"offset\" in signal_dct:\n offset = signal_dct['offset'][index]\n signal_id = \"{}{}{}\".format(signal_name, suffix_flag, offset)\n for bin_size in snp_bin:\n signal = self.signal_name(bin_size, signal_name, flag)\n bigwig_filename = \"{}_offset{}.bw\".format(signal, offset)\n bigwig_file = self.export_data_dir_list[root_index].joinpath(bigwig_filename)\n bigwig_current_path = Path(bigwig_file.parent.parent.name).joinpath(bigwig_file.parent.name, bigwig_file.name).as_posix()\n if bigwig_file.exists():\n scales[bin_size] = bigwig_current_path\n else:\n signal_id = \"{}{}\".format(signal_name, suffix_flag)\n for bin_size in snp_bin:\n signal = self.signal_name(bin_size, signal_name, flag)\n bigwig_filename = \"{}.bw\".format(signal)\n bigwig_file = self.export_data_dir_list[root_index].joinpath(bigwig_filename)\n bigwig_current_path = Path(bigwig_file.parent.parent.name).joinpath(bigwig_file.parent.name, bigwig_file.name).as_posix()\n if bigwig_file.exists():\n scales[bin_size] = bigwig_current_path\n if len(scales) > 0:\n snp_url_dct_list.append({\n \"storeClass\": \"MultiScaleBigWig/Store/SeqFeature/MultiScaleBigWig\",\n \"scales\": scales,\n \"name\": signal_id,\n \"color\": signal_dct['color'][index],\n \"nonCont\": signal_dct['nonCont'][index]\n })\n if len(snp_url_dct_list) > 0:\n track_dct = {\n \"category\": self.pytor_names[root_index],\n 'autoscale': 'local',\n \"storeClass\": \"MultiBigWig/Store/SeqFeature/MultiBigWig\",\n \"showTooltips\": True,\n \"showLabels\": True,\n \"clickTooltips\": True,\n \"max_score\": 1,\n \"key\": \"SNP\",\n \"label\": \"SNP {}\".format(self.pytor_names[root_index]),\n \"type\": \"MultiBigWig/View/Track/MultiWiggle/MultiXYPlot\",\n 'urlTemplates': snp_url_dct_list,\n }\n track_dct_list.append(track_dct)\n return track_dct_list\n\n def create_tracklist_json(self):\n \"\"\"\n create track list file\n\n return\n ----------\n track_dct: dict\n tracklist configuration\n \"\"\"\n _logger.debug(\"Creates config file: {}\".format(self.export_tracklist_file))\n\n # reference config\n track_dct = self.add_config_reference()\n\n # create rd config\n rd_track_list = self.add_rd_config_track()\n for rd_track in rd_track_list:\n track_dct['tracks'].append(rd_track)\n\n # create SNP config\n snp_track_list = self.add_snp_config_track()\n for snp_track in snp_track_list:\n track_dct['tracks'].append(snp_track)\n\n with open(self.export_tracklist_file, 'w') as f:\n json.dump(track_dct, f, indent=2)\n return track_dct\n\n def create_reference_json(self):\n _logger.debug(\"Exporting reference details\")\n # get signal details\n chr_len = list(np.array(self.io[0].get_signal(None, None, \"chromosome lengths\")).astype(\"str\"))\n chr_dct = dict(zip(chr_len[::2], chr_len[1::2]))\n\n # create signal list in proper format\n chr_dct_list = []\n for chr, length in chr_dct.items():\n tmp_dct = {\"end\": length, \"length\": length, \"name\": chr, \"start\": 0}\n chr_dct_list.append(tmp_dct)\n\n # save it to file\n with open(self.export_ref_file, 'w') as f:\n json.dump(chr_dct_list, f, indent=2)\n\n def __del__(self):\n\n _logger.info(\"JBrowse export: complete\")\n _logger.info(\"Copy this directory to jbrowse directory if export path is not set to JBrowse path, \"\n \"To access this via localhost: http://localhost/jbrowse/?data={}\"\n .format(self.export_directory.parent.name))\n","repo_name":"abyzovlab/CNVpytor","sub_path":"cnvpytor/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":24037,"program_lang":"python","lang":"en","doc_type":"code","stars":130,"dataset":"github-code","pt":"81"} +{"seq_id":"23640362395","text":"from sys import prefix\nimport flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nimport flask_wtf\nimport os\nimport auth\nimport cloudinary\nimport more_itertools as mit\n\nfrom api import req_lib\n\nos.environ[\"APP_SECRET_KEY\"] = \"asdfadfs\"\n\ncloudinary.config(\n cloud_name=\"dqv7e2cyi\",\n api_key=\"244334546783172\",\n api_secret=\"P-0gM5gXEWHk7UCcQr1xIav3pQg\",\n)\nimport cloudinary.uploader\nimport cloudinary.api\n\napp = flask.Flask(\n __name__, template_folder=\"src\", static_folder=\"static_files\"\n)\napp.secret_key = os.environ[\"APP_SECRET_KEY\"]\n\napp.config[\n \"SQLALCHEMY_DATABASE_URI\"\n] = \"postgresql+psycopg2://stwiezab:eN4T8unVzyIE49TzhKCbf1m5lKkGhjWU@peanut.db.elephantsql.com/stwiezab\"\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\nflask_wtf.csrf.CSRFProtect(app)\n\nfrom models import (\n ClubMembersModel,\n UsersModel,\n ClubsModel,\n JoinRequests,\n InviteRequests,\n CreationRequests,\n UndergraduatesModel,\n)\n\ntest = True\n\n\ndef checkValidUser():\n netid = auth.authenticate()\n user = db.session.get(UsersModel, netid)\n if not user:\n return flask.abort(flask.redirect(\"/profile-create\"))\n if user.first_time:\n user.first_time = False\n db.session.commit()\n return flask.abort(flask.redirect(\"/profile-update\"))\n if user.is_banned:\n return flask.abort(flask.redirect(\"/banned\"))\n return user\n\n\ndef checkValidAdmin():\n user = checkValidUser()\n if not user.is_admin:\n return flask.abort(flask.redirect(\"/index\"))\n return user\n\n\ndef checkValidClub(clubid):\n club = db.session.get(ClubsModel, clubid)\n if not club:\n return flask.abort(flask.redirect(\"/index\"))\n return club\n\n\ndef checkValidMember(user, club):\n clubmember = db.session.get(\n ClubMembersModel, (user.netid, club.clubid)\n )\n if not clubmember:\n return flask.abort(\n flask.redirect(\n \"/group-join-request?clubid=\" + str(club.clubid)\n )\n )\n return clubmember\n\n\ndef checkValidModerator(user, club):\n clubmember = checkValidMember(user, club)\n if not clubmember.is_moderator:\n return flask.abort(\n flask.redirect(\"/group-members?clubid=\" + str(club.clubid))\n )\n return clubmember\n\n\n## Index Route\n@app.route(\"/\", methods=[\"GET\"])\ndef index():\n if auth.loggedin() is not None:\n return flask.redirect(\"/index\")\n html_code = flask.render_template(\"landing.html\")\n response = flask.make_response(html_code)\n return response\n\n\n# landing page\n@app.route(\"/landing\", methods=[\"GET\"])\n@app.route(\"/index\", methods=[\"GET\"])\ndef landing():\n # Setup data model\n user = checkValidUser()\n search_string = flask.request.args.get(\"search\")\n\n # get the string that user searched and current page number\n page_number = flask.request.args.get(\"page\")\n if not page_number:\n page_number = 0\n\n if search_string == None:\n html_code = flask.render_template(\n \"index.html\", user=user, no_search=True\n )\n response = flask.make_response(html_code)\n return response\n else:\n lowercase = search_string.lower()\n users = (\n db.session.query(UsersModel)\n .filter(\n (UsersModel.netid.ilike(\"%\" + lowercase + \"%\"))\n | (UsersModel.display_name.ilike(\"%\" + lowercase + \"%\"))\n | (\n (\n UsersModel.first_name\n + \" \"\n + UsersModel.last_name\n ).ilike(\"%\" + lowercase + \"%\")\n )\n )\n .filter(UsersModel.is_banned == False)\n .order_by(UsersModel.netid)\n .all()\n )\n # split users up into pages of 50. Each page is split into lists of 10.\n # Later on, allow user to select number of results per page.\n users_pages = list(mit.chunked(users, 10))\n\n results_length = len(users)\n if results_length > 10:\n users = users[:10]\n\n html_code = flask.render_template(\n \"index.html\",\n user=user,\n search_string=search_string,\n users=users,\n results_length=results_length,\n users_pages=users_pages,\n max_pages=len(users_pages),\n page_number=int(page_number),\n no_search=False,\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/about\", methods=[\"GET\"])\ndef about():\n # Setup data model\n user = checkValidUser()\n html_code = flask.render_template(\"about.html\", user=user)\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/logout\", methods=[\"GET\"])\ndef logout():\n auth.logoutapp()\n\n\n## Profile Creation Route\n@app.route(\"/profile-create\", methods=[\"GET\"])\ndef profilecreation():\n netid = auth.authenticate()\n user = db.session.get(UsersModel, netid)\n if user is not None:\n return flask.redirect(\"/index\")\n # Only needs to render the form\n return flask.render_template(\"profile-create.html\")\n\n\n## Profile Update Route\n@app.route(\"/profile-update\", methods=[\"GET\"])\ndef profileupdate():\n user = checkValidUser()\n # Only needs to render the update form\n html_code = flask.render_template(\"profile-update.html\", user=user)\n response = flask.make_response(html_code)\n return response\n\n\n## Profile Posting Route (creation)\n@app.route(\"/profilepost\", methods=[\"POST\"])\ndef profilepost():\n # Get all important pieces of the form and turn them into\n # a data set\n ## ADD MORE AS NEEDED\n netid = auth.authenticate()\n first_name = flask.request.form[\"first_name\"]\n last_name = flask.request.form[\"last_name\"]\n display_name = first_name + \" \" + last_name\n phone = flask.request.form[\"phone\"]\n instagram = flask.request.form[\"instagram\"]\n snapchat = flask.request.form[\"snapchat\"]\n email = netid + \"@princeton.edu\"\n try:\n photo = cloudinary.uploader.upload(\n flask.request.files[\"photo\"], public_id=netid\n )[\"url\"]\n except:\n photo = cloudinary.api.resource(\n \"/Additional%20Files/default_user_icon\"\n )[\"url\"]\n is_admin = False\n is_banned = False\n new_user = UsersModel(\n netid,\n first_name,\n last_name,\n display_name,\n phone,\n instagram,\n snapchat,\n email,\n is_admin,\n is_banned,\n photo,\n False,\n )\n # Input the user into the DB\n db.session.add(new_user)\n db.session.commit()\n # Redirect to index for loading the user's new page\n return flask.redirect(\"/index\")\n\n\n## Profile Posting Route (update profile)\n@app.route(\"/profileupdatepost\", methods=[\"POST\"])\ndef profileput():\n # Get all important pieces of the form and change them in the user's info\n ## ADD MORE AS NEEDED\n netid = auth.authenticate()\n user = db.session.get(UsersModel, netid)\n user.first_name = flask.request.form[\"first_name\"]\n user.last_name = flask.request.form[\"last_name\"]\n user.display_name = (\n flask.request.form[\"first_name\"]\n + \" \"\n + flask.request.form[\"last_name\"]\n )\n user.phone = flask.request.form[\"phone\"]\n user.instagram = flask.request.form[\"instagram\"]\n user.snapchat = flask.request.form[\"snapchat\"]\n photo = flask.request.files[\"photo\"]\n if photo:\n user.photo = cloudinary.uploader.upload(photo, public_id=netid)[\n \"url\"\n ]\n\n ### DEPRECATED CODE: photo removal ###\n # photo = cloudinary.api.resource(netid)\n # cloudinary.uploader.destroy(photo)\n\n ### DEPRECATED CODE: replace photo with default ###\n # user.photo = cloudinary.api.resource(\n # \"/Additional%20Files/default_user_icon\"\n # )[\"url\"]\n # \"\"\"\n # if flask.request.files[\"photo\"] == \"delete\":\n # user.photo = cloudinary.api.resource(\n # \"/Additional%20Files/default_user_icon\"\n # )[\"url\"]\n # \"\"\"\n\n # Input the user into the DB\n db.session.add(user)\n db.session.commit()\n # Redirect to index for loading the user's new page\n return flask.redirect(\"/index\")\n\n\n## Group Creation Route\n@app.route(\"/group-create-request\", methods=[\"GET\"])\ndef groupcreation():\n user = checkValidUser()\n # Only needs to render the creation form\n return flask.render_template(\"group-create-request.html\", user=user)\n\n\n@app.route(\"/grouprequestpost\", methods=[\"POST\"])\ndef grouprequestpost():\n netid = auth.authenticate()\n name = flask.request.form[\"name\"]\n attributes = [\"share_socials\", \"share_phone\"]\n share_phone = flask.request.form.get(\"share_phone\") == \"on\"\n share_socials = flask.request.form.get(\"share_socials\") == \"on\"\n public = flask.request.form.get(\"public\") == \"on\"\n recent_request = (\n db.session.query(CreationRequests)\n .order_by(CreationRequests.reqid.desc())\n .first()\n )\n reqid = 0\n if recent_request is not None:\n reqid = recent_request.reqid + 1\n new_club_request = CreationRequests(\n reqid, name, netid, public, share_phone, share_socials\n )\n db.session.add(new_club_request)\n db.session.commit()\n # Redirect to index for loading the user's new page\n return flask.redirect(\"/index\")\n\n\n@app.route(\"/groups\", methods=[\"GET\"])\ndef groups():\n user = checkValidUser()\n clubs = (\n db.session.query(ClubsModel)\n .filter(ClubMembersModel.netid == user.netid)\n .filter(ClubsModel.clubid == ClubMembersModel.clubid)\n .order_by(ClubsModel.name)\n .all()\n )\n html_code = flask.render_template(\n \"groups.html\", user=user, clubs=clubs\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/group-results\", methods=[\"GET\"])\ndef groupresults():\n user = checkValidUser()\n search = flask.request.args.get(\"search\").lower()\n adminclubs = (\n db.session.query(ClubsModel)\n .filter(ClubMembersModel.netid == user.netid)\n .filter(ClubsModel.clubid == ClubMembersModel.clubid)\n .filter(ClubMembersModel.is_moderator == True)\n .filter(ClubsModel.name.ilike(\"%\" + search + \"%\"))\n .order_by(ClubsModel.name)\n .all()\n )\n nonadminclubs = (\n db.session.query(ClubsModel)\n .filter(ClubMembersModel.netid == user.netid)\n .filter(ClubsModel.clubid == ClubMembersModel.clubid)\n .filter(ClubMembersModel.is_moderator == False)\n .filter(ClubsModel.name.ilike(\"%\" + search + \"%\"))\n .order_by(ClubsModel.name)\n .all()\n )\n html_code = flask.render_template(\n \"group-results.html\",\n user=user,\n adminclubs=adminclubs,\n nonadminclubs=nonadminclubs,\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/groups-search\", methods=[\"GET\"])\ndef groupssearch():\n user = checkValidUser()\n html_code = flask.render_template(\"groups-search.html\", user=user)\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/group-search-results\", methods=[\"GET\"])\ndef groupsearchresults():\n user = checkValidUser()\n search = flask.request.args.get(\"search\").lower()\n clubs = (\n db.session.query(ClubsModel)\n .filter(ClubsModel.name.ilike(\"%\" + search + \"%\"))\n .order_by(ClubsModel.name)\n .all()\n )\n html_code = flask.render_template(\n \"group-search-results.html\", user=user, clubs=clubs\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/group-members\", methods=[\"GET\"])\ndef groupmembers():\n user = checkValidUser()\n clubid = flask.request.args.get(\"clubid\")\n club = checkValidClub(clubid)\n clubmember = checkValidMember(user, club)\n adminmembers = (\n db.session.query(UsersModel)\n .filter(ClubMembersModel.clubid == clubid)\n .filter(UsersModel.netid == ClubMembersModel.netid)\n .filter(ClubMembersModel.is_moderator == True)\n .filter(UsersModel.is_banned == False)\n .order_by(UsersModel.first_name)\n .all()\n )\n nonadminmembers = (\n db.session.query(UsersModel)\n .filter(ClubMembersModel.clubid == clubid)\n .filter(UsersModel.netid == ClubMembersModel.netid)\n .filter(ClubMembersModel.is_moderator == False)\n .filter(UsersModel.is_banned == False)\n .order_by(UsersModel.first_name)\n .all()\n )\n\n html_code = flask.render_template(\n \"group-members.html\",\n user=user,\n adminmembers=adminmembers,\n nonadminmembers=nonadminmembers,\n clubid=clubid,\n clubmember=clubmember,\n name=club.name,\n is_public=club.public,\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/toggle-visibility\", methods=[\"GET\", \"POST\"])\ndef togglevisibility():\n user = checkValidUser()\n clubid = flask.request.args.get(\"clubid\")\n club = checkValidClub(clubid)\n clubmember = checkValidMember(user, club)\n adminmembers = (\n db.session.query(UsersModel)\n .filter(ClubMembersModel.clubid == clubid)\n .filter(UsersModel.netid == ClubMembersModel.netid)\n .filter(ClubMembersModel.is_moderator == True)\n .filter(UsersModel.is_banned == False)\n .order_by(UsersModel.first_name)\n .all()\n )\n nonadminmembers = (\n db.session.query(UsersModel)\n .filter(ClubMembersModel.clubid == clubid)\n .filter(UsersModel.netid == ClubMembersModel.netid)\n .filter(ClubMembersModel.is_moderator == False)\n .filter(UsersModel.is_banned == False)\n .order_by(UsersModel.first_name)\n .all()\n )\n\n new_permissions = db.session.get(ClubsModel, clubid)\n new_permissions.public = not club.public\n\n # Input the user into the DB\n db.session.add(new_permissions)\n db.session.commit()\n html_code = flask.render_template(\n \"group-members.html\",\n user=user,\n adminmembers=adminmembers,\n nonadminmembers=nonadminmembers,\n clubid=clubid,\n clubmember=clubmember,\n name=club.name,\n is_public=club.public,\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/group-requests\", methods=[\"GET\"])\ndef grouprequests():\n user = checkValidUser()\n clubid = flask.request.args.get(\"clubid\")\n club = checkValidClub(clubid)\n clubmember = checkValidModerator(user, club)\n students = (\n db.session.query(UsersModel)\n .filter(JoinRequests.clubid == clubid)\n .filter(UsersModel.netid == JoinRequests.netid)\n .filter(UsersModel.is_banned == False)\n .order_by(UsersModel.first_name)\n .all()\n )\n html_code = flask.render_template(\n \"group-requests.html\",\n user=user,\n students=students,\n clubid=clubid,\n clubmember=clubmember,\n name=club.name,\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/group-join-request\", methods=[\"GET\"])\ndef groupjoinrequest():\n user = checkValidUser()\n clubid = flask.request.args.get(\"clubid\")\n club = checkValidClub(clubid)\n member = db.session.get(ClubMembersModel, (user.netid, clubid))\n if member is not None:\n return flask.redirect(\"/group-members?clubid=\" + clubid)\n html_code = flask.render_template(\n \"group-join-request.html\", user=user, club=club\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/groupjoinpost\", methods=[\"POST\"])\ndef groupjoinpost():\n netid = auth.authenticate()\n clubid = flask.request.args.get(\"clubid\")\n active_request = db.session.get(JoinRequests, (netid, clubid))\n if active_request is not None:\n return flask.redirect(\"/index\")\n request_exists = db.session.get(InviteRequests, (netid, clubid))\n if request_exists is not None:\n return flask.redirect(\"/my-invites\")\n request = JoinRequests(netid, clubid)\n db.session.add(request)\n db.session.commit()\n # Redirect to index for loading the user's new page\n return flask.redirect(\"/index\")\n\n\n@app.route(\"/groupjoinfulfill\", methods=[\"POST\"])\ndef groupjoinfulfill():\n clubid = flask.request.args.get(\"clubid\")\n join_netid = flask.request.args.get(\"join_netid\")\n accept = flask.request.args.get(\"accept\")\n join_request = db.session.get(JoinRequests, (join_netid, clubid))\n db.session.delete(join_request)\n db.session.commit()\n if accept == \"1\":\n new_club_member = ClubMembersModel(clubid, join_netid, False)\n db.session.add(new_club_member)\n db.session.commit()\n return flask.redirect(\"/group-requests?clubid=\" + clubid)\n\n\n@app.route(\"/group-leave\", methods=[\"GET\"])\ndef groupleave():\n user = checkValidUser()\n clubid = flask.request.args.get(\"clubid\")\n club = checkValidClub(clubid)\n member = checkValidMember(user, club)\n html_code = flask.render_template(\n \"group-leave.html\", user=user, club=club\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/groupleavepost\", methods=[\"POST\"])\ndef groupleavepost():\n netid = auth.authenticate()\n clubid = flask.request.args.get(\"clubid\")\n member = db.session.get(ClubMembersModel, (netid, clubid))\n db.session.delete(member)\n db.session.commit()\n # Redirect to index for loading the user's new page\n return flask.redirect(\"/groups\")\n\n\n@app.route(\"/group-remove-member\", methods=[\"GET\"])\ndef groupremovemember():\n user = checkValidUser()\n clubid = flask.request.args.get(\"clubid\")\n club = checkValidClub(clubid)\n member = checkValidModerator(user, club)\n members = (\n db.session.query(ClubMembersModel.is_moderator, UsersModel)\n .filter(ClubMembersModel.clubid == clubid)\n .filter(UsersModel.netid == ClubMembersModel.netid)\n .filter(UsersModel.is_banned == False)\n .order_by(UsersModel.first_name)\n .all()\n )\n html_code = flask.render_template(\n \"group-remove-member.html\",\n user=user,\n members=members,\n club=club,\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/removemember\", methods=[\"POST\"])\ndef removemember():\n clubid = flask.request.args.get(\"clubid\")\n member_netid = flask.request.args.get(\"netid\")\n deleted_member = db.session.get(\n ClubMembersModel, (member_netid, clubid)\n )\n db.session.delete(deleted_member)\n db.session.commit()\n return flask.redirect(\"/group-remove-member?clubid=\" + clubid)\n\n\n@app.route(\"/group-moderator-upgrade\", methods=[\"GET\"])\ndef groupmoderatorupgrade():\n user = checkValidUser()\n clubid = flask.request.args.get(\"clubid\")\n club = checkValidClub(clubid)\n member = checkValidModerator(user, club)\n members = (\n db.session.query(ClubMembersModel.is_moderator, UsersModel)\n .filter(ClubMembersModel.clubid == clubid)\n .filter(UsersModel.netid == ClubMembersModel.netid)\n .filter(UsersModel.is_banned == False)\n .order_by(UsersModel.first_name)\n .all()\n )\n html_code = flask.render_template(\n \"group-moderator-upgrade.html\",\n user=user,\n members=members,\n club=club,\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/upgrademember\", methods=[\"POST\"])\ndef upgrademember():\n clubid = flask.request.args.get(\"clubid\")\n member_netid = flask.request.args.get(\"netid\")\n upgraded_member = db.session.get(\n ClubMembersModel, (member_netid, clubid)\n )\n upgraded_member.is_moderator = True\n db.session.add(upgraded_member)\n db.session.commit()\n return flask.redirect(\"/group-moderator-upgrade?clubid=\" + clubid)\n\n\n@app.route(\"/group-invite-request\", methods=[\"GET\"])\ndef groupinviterequest():\n user = checkValidUser()\n clubid = flask.request.args.get(\"clubid\")\n club = checkValidClub(clubid)\n member = checkValidModerator(user, club)\n html_code = flask.render_template(\n \"group-invite-request.html\", user=user, club=club\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/groupinvitepost\", methods=[\"POST\"])\ndef groupinvitepost():\n clubid = flask.request.args.get(\"clubid\")\n invited_netid = flask.request.form[\"netid\"]\n invited_user = db.session.get(UsersModel, invited_netid)\n invited_member = db.session.get(\n ClubMembersModel, (invited_netid, clubid)\n )\n join_exists = db.session.get(JoinRequests, (invited_netid, clubid))\n request_exists = db.session.get(\n InviteRequests, (invited_netid, clubid)\n )\n if (\n invited_user is None\n or invited_member is not None\n or join_exists is not None\n or request_exists is not None\n ):\n return flask.redirect(\n flask.url_for(\"invitenetiderror\", clubid=clubid), code=307\n )\n\n request = InviteRequests(invited_netid, clubid)\n db.session.add(request)\n db.session.commit()\n return flask.redirect(\"/group-members?clubid=\" + clubid)\n\n\n@app.route(\"/invite-netid-error\", methods=[\"POST\"])\ndef invitenetiderror():\n netid = auth.authenticate()\n user = db.session.get(UsersModel, netid)\n clubid = flask.request.args.get(\"clubid\")\n html_code = flask.render_template(\n \"invite-netid-error.html\", user=user, clubid=clubid\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/user-info\", methods=[\"GET\"])\ndef userinfo():\n user = checkValidUser()\n member_netid = flask.request.args.get(\"netid\")\n is_my_profile = member_netid == user.netid\n\n # if you're looking at your own profile, show all info\n if is_my_profile:\n html_code = flask.render_template(\n \"user-info.html\",\n requested_user=user,\n user=user,\n share_phone=True,\n share_socials=True,\n is_my_profile=is_my_profile,\n )\n response = flask.make_response(html_code)\n return response\n\n # find all shared clubs\n member_clubs = db.session.query(ClubMembersModel.clubid).filter(\n ClubMembersModel.netid == member_netid\n )\n\n user_clubs = db.session.query(ClubMembersModel.clubid).filter(\n ClubMembersModel.netid == user.netid\n )\n shared_clubs = set(member_clubs).intersection(set(user_clubs))\n share_phone = False\n share_socials = False\n for clubid in shared_clubs:\n club = db.session.get(ClubsModel, clubid)\n if club.share_phone:\n share_phone = True\n if club.share_socials:\n share_socials = True\n if share_phone and share_socials:\n break\n\n requested_user = db.session.get(UsersModel, member_netid)\n if requested_user.is_banned:\n return flask.redirect(\"/index\")\n html_code = flask.render_template(\n \"user-info.html\",\n requested_user=requested_user,\n user=user,\n share_phone=share_phone,\n share_socials=share_socials,\n is_my_profile=is_my_profile,\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/my-invites\", methods=[\"GET\"])\ndef myinvites():\n user = checkValidUser()\n invites = (\n db.session.query(ClubsModel)\n .filter(InviteRequests.netid == user.netid)\n .filter(ClubsModel.clubid == InviteRequests.clubid)\n .all()\n )\n html_code = flask.render_template(\n \"my-invites.html\", user=user, invites=invites\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/invitefulfill\", methods=[\"POST\"])\ndef invitefulfill():\n netid = auth.authenticate()\n clubid = flask.request.args.get(\"clubid\")\n accept = flask.request.args.get(\"accept\")\n invite_request = db.session.get(InviteRequests, (netid, clubid))\n db.session.delete(invite_request)\n db.session.commit()\n if accept == \"1\":\n new_club_member = ClubMembersModel(clubid, netid, False)\n db.session.add(new_club_member)\n db.session.commit()\n return flask.redirect(\"/my-invites\")\n\n\n@app.route(\"/pending-invites\", methods=[\"GET\"])\ndef pendinginvites():\n user = checkValidUser()\n clubid = flask.request.args.get(\"clubid\")\n club = checkValidClub(clubid)\n invites = (\n db.session.query(UsersModel)\n .filter(UsersModel.netid == InviteRequests.netid)\n .filter(InviteRequests.clubid == clubid)\n .filter(UsersModel.is_banned == False)\n .all()\n )\n html_code = flask.render_template(\n \"pending-invites.html\",\n user=user,\n invites=invites,\n name=club.name,\n clubid=clubid,\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/admin-console\", methods=[\"GET\"])\ndef adminconsole():\n user = checkValidAdmin()\n html_code = flask.render_template(\"admin-console.html\", user=user)\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/group-creation-requests\", methods=[\"GET\"])\ndef groupcreationrequests():\n user = checkValidAdmin()\n requests = (\n db.session.query(CreationRequests)\n .order_by(CreationRequests.name)\n .all()\n )\n html_code = flask.render_template(\n \"group-creation-requests.html\", user=user, requests=requests\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/groupfulfill\", methods=[\"POST\"])\ndef groupfulfill():\n reqid = flask.request.args.get(\"reqid\")\n creator_netid = flask.request.args.get(\"netid\")\n accept = flask.request.args.get(\"accept\")\n created_club = db.session.get(CreationRequests, reqid)\n if accept == \"0\":\n db.session.delete(created_club)\n db.session.commit()\n return flask.redirect(\"/group-creation-requests\")\n recent_club = (\n db.session.query(ClubsModel)\n .order_by(ClubsModel.clubid.desc())\n .first()\n )\n clubid = 0\n if recent_club is not None:\n clubid = recent_club.clubid + 1\n name = created_club.name\n share_phone = created_club.share_phone\n share_socials = created_club.share_socials\n public = created_club.public\n new_club = ClubsModel(\n clubid, name, public, share_phone, share_socials\n )\n new_club_member = ClubMembersModel(clubid, creator_netid, True)\n db.session.add(new_club)\n db.session.add(new_club_member)\n db.session.commit()\n db.session.delete(created_club)\n db.session.commit()\n # Redirect to index for loading the user's new page\n return flask.redirect(\"/group-creation-requests\")\n\n\n@app.route(\"/group-removal\", methods=[\"GET\"])\ndef groupremoval():\n user = checkValidAdmin()\n groups = (\n db.session.query(ClubsModel.clubid, ClubsModel.name)\n .order_by(ClubsModel.name)\n .all()\n )\n html_code = flask.render_template(\n \"group-removal.html\", user=user, groups=groups\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/removegroup\", methods=[\"POST\"])\ndef removegroup():\n clubid = flask.request.args.get(\"clubid\")\n while True:\n member = (\n db.session.query(ClubMembersModel)\n .filter(ClubMembersModel.clubid == clubid)\n .first()\n )\n joinreq = (\n db.session.query(JoinRequests)\n .filter(JoinRequests.clubid == clubid)\n .first()\n )\n invitereq = (\n db.session.query(InviteRequests)\n .filter(InviteRequests.clubid == clubid)\n .first()\n )\n if member is None and joinreq is None and invitereq is None:\n break\n if member is not None:\n db.session.delete(member)\n if joinreq is not None:\n db.session.delete(joinreq)\n if invitereq is not None:\n db.session.delete(invitereq)\n db.session.commit()\n club = db.session.get(ClubsModel, clubid)\n db.session.delete(club)\n db.session.commit()\n return flask.redirect(\"/group-removal\")\n\n\n@app.route(\"/ban-user\", methods=[\"GET\"])\ndef banuser():\n user = checkValidAdmin()\n html_code = flask.render_template(\"ban-user.html\", user=user)\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/banuserpost\", methods=[\"POST\"])\ndef banuserpost():\n banned_netid = flask.request.form[\"netid\"]\n user = db.session.get(UsersModel, banned_netid)\n if user is None:\n return flask.redirect(\n flask.url_for(\"adminnetiderror\"), code=307\n )\n if user.is_admin:\n return flask.redirect(\n flask.url_for(\"adminnetiderror\"), code=307\n )\n user.is_banned = True\n db.session.add(user)\n db.session.commit()\n return flask.redirect(\"/admin-console\")\n\n\n@app.route(\"/admin-upgrade\", methods=[\"GET\"])\ndef adminupgrade():\n user = checkValidAdmin()\n html_code = flask.render_template(\"admin-upgrade.html\", user=user)\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/adminupgradepost\", methods=[\"POST\"])\ndef adminupgradepost():\n upgraded_netid = flask.request.form[\"netid\"]\n user = db.session.get(UsersModel, upgraded_netid)\n if user is None:\n return flask.redirect(\n flask.url_for(\"adminnetiderror\"), code=307\n )\n user.is_admin = True\n db.session.add(user)\n db.session.commit()\n return flask.redirect(\"/admin-console\")\n\n\n@app.route(\"/admin-netid-error\", methods=[\"POST\"])\ndef adminnetiderror():\n netid = auth.authenticate()\n user = db.session.get(UsersModel, netid)\n html_code = flask.render_template(\n \"admin-netid-error.html\", user=user\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/banned\", methods=[\"GET\"])\ndef banned():\n netid = auth.authenticate()\n user = db.session.get(UsersModel, netid)\n if not user.is_banned:\n return flask.redirect(\"/index\")\n html_code = flask.render_template(\"banned.html\")\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/banned-users\", methods=[\"GET\"])\ndef bannedusers():\n user = checkValidAdmin()\n banned_users = (\n db.session.query(UsersModel)\n .filter(UsersModel.is_banned == True)\n .order_by(UsersModel.first_name)\n .all()\n )\n html_code = flask.render_template(\n \"banned-users.html\", user=user, banned_users=banned_users\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/unbanuserpost\", methods=[\"POST\"])\ndef unbanuserpost():\n unbanned_netid = flask.request.args.get(\"netid\")\n banned_user = db.session.get(UsersModel, unbanned_netid)\n banned_user.is_banned = False\n db.session.add(banned_user)\n db.session.commit()\n return flask.redirect(\"/banned-users\")\n\n\n@app.route(\"/users\", methods=[\"GET\"])\ndef users():\n user = checkValidUser()\n\n # get the string that user searched and current page number\n search_string = flask.request.args.get(\"search\")\n if not search_string:\n search_string = \"\"\n page_number = flask.request.args.get(\"page\")\n if not page_number:\n page_number = 0\n\n # get all users and their information\n users = (\n db.session.query(UsersModel)\n .filter(UsersModel.is_banned == False)\n .order_by(UsersModel.netid)\n .all()\n )\n\n # if the search string is not empty, find users whose names or netids\n # contain the desired search string\n if search_string:\n lowercase = search_string.lower()\n print(lowercase)\n users = (\n db.session.query(UsersModel)\n .filter(\n (UsersModel.netid.ilike(\"%\" + lowercase + \"%\"))\n | (UsersModel.display_name.ilike(\"%\" + lowercase + \"%\"))\n | (\n (\n UsersModel.first_name\n + \" \"\n + UsersModel.last_name\n ).ilike(\"%\" + lowercase + \"%\")\n )\n )\n .filter(UsersModel.is_banned == False)\n .order_by(UsersModel.netid)\n .all()\n )\n\n # split users up into pages of 50. Each page is split into lists of 10.\n # Later on, allow user to select number of results per page.\n chunked = list(mit.chunked(users, 50))\n users_pages = []\n for page in chunked:\n users_pages.append(list(mit.chunked(page, 10)))\n\n html_code = flask.render_template(\n \"users.html\",\n user=user,\n users_pages=users_pages,\n max_pages=len(users_pages),\n page_number=int(page_number),\n search_string=search_string,\n )\n response = flask.make_response(html_code)\n return response\n\n\n@app.route(\"/refresh-database\", methods=[\"POST\"])\ndef refreshdatabase():\n # clear database\n db.session.query(UndergraduatesModel).delete()\n\n req = req_lib.ReqLib()\n\n class2023 = req.getJSON(\n req.configs.GROUPS, name=\"Undergraduate Class of 2023\"\n )\n class2024 = req.getJSON(\n req.configs.GROUPS, name=\"Undergraduate Class of 2024\"\n )\n class2025 = req.getJSON(\n req.configs.GROUPS, name=\"Undergraduate Class of 2025\"\n )\n\n # class of 2026 has no members right now\n # class2026 = req.getJSON(req.configs.GROUPS, name=\"Undergraduate Class of 2026\")\n\n def get_uid(member):\n return member.split(\",\")[0][3:]\n\n for member in class2023[0][\"member\"]:\n uid = get_uid(member)\n undergraduate = UndergraduatesModel(uid, 2023)\n db.session.add(undergraduate)\n\n for member in class2024[0][\"member\"]:\n uid = get_uid(member)\n undergraduate = UndergraduatesModel(uid, 2024)\n db.session.add(undergraduate)\n\n for member in class2025[0][\"member\"]:\n uid = get_uid(member)\n undergraduate = UndergraduatesModel(uid, 2025)\n db.session.add(undergraduate)\n\n # for member in class2026[0]['member']:\n # uid = get_uid(member)\n # undergraduate = UndergraduatesModel(uid, 2026)\n # db.session.add(undergraduate)\n\n db.session.commit()\n return flask.redirect(\"/index\")\n\n\n## Profile Update Route\n@app.route(\"/my-contacts\", methods=[\"GET\"])\ndef mycontacts():\n user = checkValidUser()\n\n clubids = db.session.query(ClubMembersModel.clubid).filter(\n ClubMembersModel.netid == user.netid\n )\n\n contacts = (\n db.session.query(UsersModel)\n .filter(ClubMembersModel.netid != user.netid)\n .filter(UsersModel.netid == ClubMembersModel.netid)\n .filter(ClubMembersModel.clubid.in_(clubids))\n .filter(UsersModel.is_banned == False)\n .order_by(UsersModel.first_name)\n .all()\n )\n\n html_code = flask.render_template(\n \"my-contacts.html\", user=user, contacts=contacts\n )\n response = flask.make_response(html_code)\n return response\n","repo_name":"cos333-look-up/COS333_look.up","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":34887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33411792866","text":"\ndef print_solution(x):\n \"\"\" prints solution \"\"\"\n print(f\"The solution is: {x}\")\n\nclass Node():\n def __init__(self):\n self.name = None\n self.parent = None\n self.value = 0\n self.children = []\n\n def __getitem__(self, i):\n return self.children[i]\n\n\ndef main():\n \"\"\" calculates solution \"\"\"\n file = open('input.txt', 'r', encoding='utf-8')\n\n node_hash = {}\n\n # creates nodes without relationships\n for line in file.readlines():\n node_name, node_score = line.strip().split()[:2]\n node_score = int(node_score.strip('()'))\n node_hash[node_name] = Node()\n node_hash[node_name].name = node_name\n node_hash[node_name].score = node_score\n\n file.seek(0)\n # create parent/child relationships\n for line in file.readlines():\n values = line.strip().split()\n if len(values) > 3:\n parent = values[0]\n for child in values[3:]:\n child = child.strip(',')\n node_hash[parent].children.append(node_hash[child])\n node_hash[child].parent = node_hash[parent]\n\n # look for node.parent = None\n for k,v in node_hash.items():\n if v.parent == None:\n print_solution(v.name)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dneff/adventofcode","sub_path":"python/2017/07/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19857210956","text":"# -*- coding: utf-8 -\n#\n# This file is part of gaffer. See the NOTICE for more information.\nimport sys\n\nfrom .base import Command\nfrom ...httpclient import GafferNotFound\n\nclass Start(Command):\n\n \"\"\"\n usage: gaffer ps:start []... [--app=appname] [--no-input]\n\n job label\n\n --app=appname name of the procfile application.\n --no-input don't prompt a confirmation\n \"\"\"\n\n name = \"ps:start\"\n short_descr = \"start a job\"\n\n def run(self, config, args):\n appname = self.default_appname(config, args)\n server, procfile = config.get(\"server\", \"procfile\")\n\n if not args['']:\n if (not args[\"--no-input\"] and\n self.confirm(\"Do you want to start all jobs in %r\" %\n appname)):\n\n apps = server.sessions()\n if appname not in apps:\n raise RuntimeError(\"%r not found\\n\" % appname)\n\n # stop all the jobs the complete app\n server.jobs_walk(lambda s, job: self._start(s, job))\n print(\"==> all jobs in %r started\" % appname)\n else:\n for name in args['']:\n # confirm that we can stop this job or pid\n if (not args[\"--no-input\"] and\n not self.confirm(\"Do you want to start %r\" % name)):\n continue\n\n # we want to stop a job\n appname, job_name = self.parse_name(name, appname)\n if (self.use_procfile(config, appname) and\n job_name not in procfile.cfg):\n print(\"Ignore %r\" % name)\n continue\n\n pname = \"%s.%s\" % (appname, job_name)\n try:\n job = server.get_job(pname)\n except GafferNotFound:\n sys.stderr.write(\"%r not found\\n\" % name)\n sys.stderr.flush()\n continue\n job.start()\n print(\"%r started\" % name)\n\n def _start(self, server, job):\n try:\n job.start()\n print(\"job %r started\" % job.name)\n except GafferNotFound:\n sys.stderr.write(\"%r not found\\n\" % job.name)\n sys.stderr.flush()\n","repo_name":"benoitc/gaffer","sub_path":"gaffer/cli/commands/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":358,"dataset":"github-code","pt":"81"} +{"seq_id":"22249045974","text":"import picamera\nimport picamera.array\nimport numpy as np\nimport time\n\nimport datetime\nimport time\n\nimport logging\nimport shutil\nimport os\n\nfrom PIL import Image\n\n\n# -----------------------------------------------------------------------------------------------\n# General setings\nfolder_path = '/home/pi/videos'\ntime_total = 4 * 60 * 60 # 4 hours\ntime_motion_record = 10 # 10 seconds\ntime_file_length = 10 * 60 # 10 minutes\ncamera_cols = 1920\ncamera_rows = 1080\nframerate = 30\n# i2c_bus = 10\n# default_focus = 300\ncamera_timestamp = False\n# -----------------------------------------------------------------------------------------------\n# Motion sensitivity\nmotion_vectors_norm = 80 # mvecs norm\nmotion_density = 80 # number of pixels with |mvecs| > motion_density\nmotion_min_log_time = 1 # seconds\n# -----------------------------------------------------------------------------------------------\n\n# Set up logging\nif not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\nlogging.basicConfig(filename=os.path.join(folder_path,'motion.log'), level=logging.INFO,\n format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n\nclass DetectMotion(picamera.array.PiMotionAnalysis):\n def __init__(self, camera):\n super(DetectMotion, self).__init__(camera)\n self.motion_detected = False\n self.last_detection = time.time()\n self.last_logged = time.time() # Add this line\n\n def analyse(self, a):\n a = np.sqrt(\n np.square(a['x'].astype(float)) +\n np.square(a['y'].astype(float))\n ).clip(0, 255).astype(np.uint8)\n\n if (a > motion_vectors_norm).sum() > motion_density:\n self.motion_detected = True\n self.last_detection = time.time()\n \n # Only log if at least 1 second has passed since the last log\n if self.last_detection - self.last_logged >= motion_min_log_time:\n logging.info('Motion detected') # Log the detection\n print('Motion detected')\n self.last_logged = self.last_detection # Update the last logged time\n\n# -----------------------------------------------------------------------------------------------\n\nif not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\nprint('starting: ')\ntarget_datetime = datetime.datetime(year=2023, month=10, day=7, hour=10, minute=30, second=30)\nprint(target_datetime)\nprint(datetime.datetime.now())\nwhile datetime.datetime.now() < target_datetime:\n time.sleep(1)\nprint('done')\n\nprint(\"Initializing Camera...\")\ncamera = picamera.PiCamera()\n# camera.resolution = (camera_cols, camera_rows)\n# camera.framerate = framerate\n\nprint(\"Camera Initialized\")\n\ntotal_disk, used_disk, free_disk = shutil.disk_usage('/home/pi/videos') # returns total, use, and free\nprint(total_disk)\nprint(used_disk)\nprint(free_disk)\n\nprint(\"Start recording...\")\noutput = DetectMotion(camera)\ncamera.start_recording('/dev/null', format='h264', motion_output=output)\n\nstart_time = time.time()\nsummed_time = 0\n\n# run the program until time_total\nwhile summed_time < time_total: #time.time() - start_time < time_total:\n camera.wait_recording(0.1)\n if output.motion_detected:\n\n start_recording_time = time.time()\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n filename = os.path.join(folder_path, timestamp)\n # print(f\"Motion detected - total time: {int(time.time() - start_time)} - current time: {filename} {int(time.time() - output.last_detection)}\")\n\n camera.split_recording(filename)\n output.motion_detected = False\n while (time.time() - output.last_detection) < time_motion_record and (time.time() - start_recording_time) < time_file_length:\n if camera_timestamp:\n camera.annotate_text = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')\n camera.wait_recording(.1)\n camera.wait_recording(.1)\n \n # check duration\n dt = int(time.time() - start_recording_time)\n summed_time += dt\n # finish previous recording\n camera.split_recording('/dev/null')\n # rename file with duration\n os.rename(filename, filename + f\"_{dt:08d}.h264\")\n # print(f\"Recording File Time = {dt:08d}\")\n\n output.motion_detected = False\n\n total_disk, used_disk, free_disk = shutil.disk_usage('/home/pi/videos') # returns total, use, and free\n if free_disk < 5000000000:\n break\n\nprint(\"Stop Recording...\")\ncamera.stop_recording()\n\n\n\n\n\n\n\n","repo_name":"guadabernal/RPiBeeDetection","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4776506150","text":"import rclpy\nfrom rclpy.node import Node\n\nfrom ros2_custom_msgs.msg import T265\n\nimport pyrealsense2 as rs\n\nclass T265DataPublisher(Node):\n\n def __init__(self):\n super().__init__('t265_data_publisher')\n self.publisher_ = self.create_publisher(T265, 't265', 10)\n timer_period = 0.5 # seconds\n self.timer = self.create_timer(timer_period, self.timer_callback)\n self.pipe = rs.pipeline()\n self.cfg = rs.config()\n self.cfg.enable_stream(rs.stream.pose)\n self.pipe.start(self.cfg)\n\n def timer_callback(self):\n frames = self.pipe.wait_for_frames()\n pose = frames.get_pose_frame()\n if pose:\n data = pose.get_pose_data() \n msg = T265()\n msg.position.position.x = data.translation.x\n msg.position.position.y = data.translation.y\n msg.position.position.z = data.translation.z\n msg.position.orientation.x = data.rotation.x\n msg.position.orientation.y = data.rotation.y\n msg.position.orientation.z = data.rotation.z\n msg.position.orientation.w = data.rotation.w\n\n msg.velocity.linear.x = data.velocity.x\n msg.velocity.linear.y = data.velocity.y\n msg.velocity.linear.z = data.velocity.z\n msg.velocity.angular.x = data.angular_velocity.x\n msg.velocity.angular.y = data.angular_velocity.y\n msg.velocity.angular.z = data.angular_velocity.z\n\n msg.acceleration.linear.x = data.acceleration.x\n msg.acceleration.linear.y = data.acceleration.y\n msg.acceleration.linear.z = data.acceleration.z\n msg.acceleration.angular.x = data.angular_acceleration.x\n msg.acceleration.angular.y = data.angular_acceleration.y\n msg.acceleration.angular.z = data.angular_acceleration.z\n\n msg.header.frame_id = \"t265_camera\"\n msg.header.stamp = self.get_clock().now().to_msg()\n\n self.publisher_.publish(msg)\n self.get_logger().info('Publishing: \"%s\"' % msg)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n t265_publisher =T265DataPublisher()\n\n rclpy.spin(t265_publisher)\n\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n t265_publisher.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()","repo_name":"neduchal/t265_ros2_wrapper","sub_path":"t265_ros2_wrapper/t265_ros2_wrapper_node.py","file_name":"t265_ros2_wrapper_node.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37111933506","text":"\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport os\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\",\"webparser.settings\")\n\nimport django\ndjango.setup()\n\nfrom data.models import WebData\n\ndef parse_web():\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n url = 'https://www.koreaherald.com//'\n req = requests.get(url)\n# url 에 접속해라. 요청해라.\n\n html = req.text\n soup = BeautifulSoup(html, 'html.parser')\n titles = soup.select(\n 'dd > a'\n )\n\n\n data = {}\n# print(titles)\n\n for title in titles:\n data[title.text] = url + title.get('href')\n # print(title.text)\n # print(title.get('href'))\n return data\n\nif __name__ == '__main__':\n web_data_dict = parse_web()\n for t, l in web_data_dict.items():\n WebData(title=t, link=url+l).save()","repo_name":"Woobin32/webparser2","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43337112677","text":"from sweetest.log import logger\nfrom sweetest.config import all_keywords, comma_lower, comma_upper, equals, vertical\nfrom sweetest.elements import e\nfrom sweetest.globals import g\n\n\ndef escape(data):\n # 先把转义字符替换掉\n # return data.replace('\\\\,', comma_lower).replace('\\\\,', comma_upper).replace('\\\\=', equals)\n return data.replace('\\\\,', comma_lower)\n\n\ndef recover(data):\n # 再把转义字符恢复\n # return data.replace(comma_lower, ',').replace(comma_upper, ',').replace(equals, '=')\n return data.replace(comma_lower, ',')\n\n\ndef check_keyword(kw):\n try:\n keyword = all_keywords.get(kw)\n return keyword\n except:\n logger.exception('Keyword:%s is not exist' % kw)\n exit()\n\n\ndef data_format(data):\n data = escape(data)\n if ',,' in data:\n data_list = data.split(',,')\n else:\n # data = data.replace(',', ',') # 中文逗号不再视为分隔符\n data_list = []\n if data:\n data_list = data.split(',')\n data_dict = {}\n for data in data_list:\n # 只需要分割第一个'='号\n d = data.split('=', 1)\n d[-1] = recover(d[-1]) # 只有值需要转义恢复,<元素属性> or <变量名> 不应该出现转义字符\n if len(d) == 1:\n # 如果没有=号分割,说明只有内容,默认赋值给 text\n if not data_dict.get('text'):\n data_dict['text'] = d[0]\n elif len(d) == 2:\n d[0] = d[0].strip() # 清除 <元素属性> 2边的空格,如果有的话\n data_dict[d[0]] = d[1]\n else:\n raise Exception(\n 'Error: Testcase\\'s Data is error, more \"=\" or less \",\"')\n return data_dict\n\n\ndef parse(testsuit):\n '''\n 将测试用例解析为可执行参数,如:\n 打开首页,解析为:OPEN 127.0.0.1\n '''\n for testcase in testsuit:\n for step in testcase['steps']:\n step['keyword'] = check_keyword(step['keyword'])\n # step['page'], step['custom'], step['element'] = elements_format(\n # step['page'], step['element'])\n step['data'] = data_format(str(step['data']))\n step['expected'] = data_format(str(step['expected']))\n step['output'] = data_format(step['output'])\n","repo_name":"Gingo222/JIN","sub_path":"SK_DailyJob/sweetest/sweetest/sweetest/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10649596601","text":"import tensorflow as tf\nfrom tensorflow import math\n\n\ndef type_to_one_hot(episode_type):\n if episode_type == '+':\n label_index = 0\n else:\n label_index = 1\n return tf.cast(tf.one_hot(label_index, 2), tf.float32)\n\n\ndef type_to_label(episode_type):\n if episode_type == '+':\n return 0\n else:\n return 1\n\n\ndef one_hot(target_action, action_prob, reward):\n action_dim = action_prob.shape[0]\n action_onehot = tf.one_hot(target_action, action_dim)\n action_mask = tf.cast(action_onehot, tf.bool)\n picked_prob = tf.boolean_mask(action_prob, action_mask)\n action_loss = tf.reduce_sum(-math.log(picked_prob) * reward)\n return action_loss\n","repo_name":"presisco/lazyDIVA","sub_path":"loss_tools.py","file_name":"loss_tools.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69999662986","text":"\"\"\"\nTensorflow keras implementation of U-Net\nhttps://github.com/Bashirkazimi/BashirLearning\nAuthor: Bashir Kazimi\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers, Model, Sequential\n\n\ndef fcn(input_shape=(128, 128, 3), num_classes=21):\n \"\"\"\n Fully Convolutional Networks for semantic segmentation implemented in\n tf.keras based on https://people.eecs.berkeley.edu/~jonlong/long_shelhamer_fcn.pdf\n Args:\n input_shape (tuple): input shape\n num_classes (int): number of categories\n\n Returns: tf.keras.Model of fcn\n\n \"\"\"\n\n base_model = tf.keras.applications.vgg16.VGG16(input_shape=input_shape,\n weights=None,\n include_top=False)\n layer_names = [\n 'block3_pool',\n 'block4_pool',\n 'block5_pool'\n ]\n\n feature_layers = [base_model.get_layer(name).output for name in layer_names]\n\n feature_extractor = Model(inputs=base_model.input, outputs=feature_layers)\n\n inputs = layers.Input(shape=input_shape)\n\n pool3, pool4, pool5 = feature_extractor(inputs)\n\n x = layers.Conv2D(\n 4096,\n 1,\n 1,\n padding='same'\n )(pool5)\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n x = layers.Conv2D(\n 4096,\n 1,\n 1,\n padding='same'\n )(x)\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n x = layers.Conv2D(\n num_classes,\n 1,\n 1,\n padding='same'\n )(x)\n\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2DTranspose(\n num_classes,\n 4,\n 2,\n padding='same'\n )(x)\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n # pool4\n pool4 = layers.Conv2D(\n num_classes,\n 1,\n 1\n )(pool4)\n pool4 = layers.BatchNormalization()(pool4)\n pool4 = layers.Activation('relu')(pool4)\n\n x = pool4+x\n\n x = layers.Conv2DTranspose(\n num_classes,\n 4,\n 2,\n padding='same'\n )(x)\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n\n # pool3\n pool3 = layers.Conv2D(\n num_classes,\n 1,\n 1\n )(pool3)\n pool3 = layers.BatchNormalization()(pool3)\n pool3 = layers.Activation('relu')(pool3)\n\n x = x + pool3\n\n x = layers.Conv2DTranspose(\n num_classes,\n 16,\n 8,\n padding='same'\n )(x)\n\n x = layers.Activation('softmax')(x)\n\n model = Model(inputs=inputs, outputs=x)\n model.summary()\n\n return model\n","repo_name":"Bashirkazimi/BashirLearning","sub_path":"src/fcn.py","file_name":"fcn.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"30635624094","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Date : 2019-07-01 14:40:16\n# @Author : Xuenan(Roderick) Wang\n# @Email : roderick_wang@outlook.com\n# @Github : https://github.com/hello-roderickwang\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport nxsdk.api.n2a as nx\nfrom nxsdk.utils.plotutils import plotRaster\n\nclass XOR:\n def __init__(self):\n self.weight_1 = np.ones((4, 2), dtype=int)\n self.weight_2 = np.ones((1, 4), dtype=int)\n self.runTime = 80\n self.num_input = 2\n self.num_output = 1\n self.num_hidden = 4\n\n # Define network structure\n def set_up_network(self, turn_on_learning=True):\n net = nx.NxNet()\n\n # Define compartment prototype(all neurons are the same)\n if turn_on_learning is True:\n comProto = nx.CompartmentPrototype(vThMant=10,\n compartmentCurrentDecay=4096,\n compartmentVoltageDecay=0,\n enableSpikeBackprop=1,\n enableSpikeBackpropFromSelf=1)\n elif turn_on_learning is False:\n comProto = nx.CompartmentPrototype(vThMant=10,\n compartmentCurrentDecay=4096,\n compartmentVoltageDecay=0)\n else:\n print('ERROR! turn_on_learning can only be True or False.')\n\n # Create compartment group for different layers\n comGrp = {}\n comGrp['inputGrp'] = net.createCompartmentGroup(size=self.num_input, prototype=comProto)\n comGrp['hiddenGrp'] = net.createCompartmentGroup(size=self.num_hidden, prototype=comProto)\n comGrp['outputGrp'] = net.createCompartmentGroup(size=self.num_output, prototype=comProto)\n\n # Create spike generator as teaching neurons\n comGrp['inputGen'] = net.createSpikeGenProcess(numPorts=self.num_input)\n comGrp['outputGen'] = net.createSpikeGenProcess(numPorts=self.num_output)\n\n # Define learning rule\n lr = net.createLearningRule(dw='2*x1*y0',\n x1Impulse=40,\n x1TimeConstant=4,\n y1Impulse=40,\n y1TimeConstant=4,\n tEpoch=2)\n\n # Define connection prototype\n if turn_on_learning is True:\n connProto = nx.ConnectionPrototype(enableLearning=1, learningRule=lr)\n connTeachingProto = nx.ConnectionPrototype(enableLearning=0)\n elif turn_on_learning is False:\n connProto = nx.ConnectionPrototype(enableLearning=0)\n else:\n print('ERROR! turn_on_learning can only be True or False.')\n\n # Create connections\n conn = {}\n if turn_on_learning is True:\n conn['inputGen_inputGrp'] = comGrp['inputGen'].connect(comGrp['inputGrp'], prototype=connTeachingProto, weight=np.array([[255, 0], [0, 255]]))\n conn['inputGrp_hiddenGrp'] = comGrp['inputGrp'].connect(comGrp['hiddenGrp'], prototype=connProto, weight=np.ones((4, 2), dtype=int)*50)\n conn['hiddenGrp_outputGrp'] = comGrp['hiddenGrp'].connect(comGrp['outputGrp'], prototype=connProto, weight=np.ones((1, 4), dtype=int)*50)\n conn['outputGen_outputGrp'] = comGrp['outputGen'].connect(comGrp['outputGrp'], prototype=connTeachingProto, weight=np.array([255]))\n elif turn_on_learning is False:\n conn['inputGen_inputGrp'] = comGrp['inputGen'].connect(comGrp['inputGrp'], prototype=connProto, weight=np.array([[255, 0], [0, 255]]))\n conn['inputGrp_hiddenGrp'] = comGrp['inputGrp'].connect(comGrp['hiddenGrp'], prototype=connProto, weight=self.weight_1)\n conn['hiddenGrp_outputGrp'] = comGrp['hiddenGrp'].connect(comGrp['outputGrp'], prototype=connProto, weight=self.weight_2)\n else:\n print('ERROR! turn_on_learning can only be True or False.')\n\n return net, comGrp, conn\n\n # Define probes\n def set_up_probe(self, comGrp, conn):\n probe = {}\n probe['inputGrpS'] = comGrp['inputGrp'].probe(nx.ProbeParameter.SPIKE)[0]\n probe['inputGrpU'] = comGrp['inputGrp'].probe(nx.ProbeParameter.COMPARTMENT_CURRENT)[0]\n probe['inputGrpV'] = comGrp['inputGrp'].probe(nx.ProbeParameter.COMPARTMENT_VOLTAGE)[0]\n probe['outputGrpS'] = comGrp['outputGrp'].probe(nx.ProbeParameter.SPIKE)[0]\n probe['outputGrpU'] = comGrp['outputGrp'].probe(nx.ProbeParameter.COMPARTMENT_CURRENT)[0]\n probe['outputGrpV'] = comGrp['outputGrp'].probe(nx.ProbeParameter.COMPARTMENT_VOLTAGE)[0]\n probe['hiddenGrpS'] = comGrp['hiddenGrp'].probe(nx.ProbeParameter.SPIKE)[0]\n probe['hiddenGrpU'] = comGrp['hiddenGrp'].probe(nx.ProbeParameter.COMPARTMENT_CURRENT)[0]\n probe['hiddenGrpV'] = comGrp['hiddenGrp'].probe(nx.ProbeParameter.COMPARTMENT_VOLTAGE)[0]\n probe['weight_1'] = conn['inputGrp_hiddenGrp'].probe(nx.ProbeParameter.SYNAPSE_WEIGHT)\n probe['weight_2'] = conn['hiddenGrp_outputGrp'].probe(nx.ProbeParameter.SYNAPSE_WEIGHT)[0][0]\n print('len(probe[weight_1]):', len(probe['weight_1']))\n print('len(probe[weight_1][0]):', len(probe['weight_1'][0]))\n return probe\n\n def save_weight(self, conn):\n self.weight_1 = conn['inputGrp_hiddenGrp'].getConnectionState('weight')\n self.weight_2 = conn['hiddenGrp_outputGrp'].getConnectionState('weight')\n\n def run(self):\n net, comGrp, conn = self.set_up_network(turn_on_learning=True)\n comGrp['inputGen'].addSpikes([0, 1], [[15, 20, 35, 40, 55, 60, 75, 80],\n [10, 20, 30, 40, 50, 60, 70, 80]])\n comGrp['outputGen'].addSpikes(0, [10, 15, 30, 35, 50, 55, 70, 75])\n probeLrn = self.set_up_probe(comGrp, conn)\n net.run(85)\n net.disconnect()\n self.save_weight(conn)\n\n print('weight_1:\\n', self.weight_1)\n print('weight_2:\\n', self.weight_2)\n\n plt.figure(1, figsize=(18, 20))\n \n fig1 = plt.subplot(9, 1, 1)\n probeLrn['inputGrpU'].plot()\n plt.title('Input compartment current')\n fig1.set_xlim(0, 100)\n \n fig2 = plt.subplot(9, 1, 2)\n probeLrn['inputGrpV'].plot()\n plt.title('Input compartment voltage')\n fig2.set_xlim(fig1.get_xlim())\n \n fig3 = plt.subplot(9, 1, 3)\n probeLrn['inputGrpS'].plot()\n plt.title('Input spikes')\n fig3.set_xlim(fig1.get_xlim())\n\n fig4 = plt.subplot(9, 1, 4)\n probeLrn['hiddenGrpU'].plot()\n plt.title('Hidden compartment current')\n fig4.set_xlim(fig1.get_xlim())\n \n fig5 = plt.subplot(9, 1, 5)\n probeLrn['hiddenGrpV'].plot()\n plt.title('Hidden compartment voltage')\n fig5.set_xlim(fig1.get_xlim())\n \n fig6 = plt.subplot(9, 1, 6)\n probeLrn['hiddenGrpS'].plot()\n plt.title('Hidden spikes')\n fig6.set_xlim(fig1.get_xlim())\n \n fig7 = plt.subplot(9, 1, 7)\n probeLrn['outputGrpU'].plot()\n plt.title('Output compartment current')\n fig7.set_xlim(fig1.get_xlim())\n \n fig8 = plt.subplot(9, 1, 8)\n probeLrn['outputGrpV'].plot()\n plt.title('Output compartment voltage')\n fig8.set_xlim(fig1.get_xlim())\n \n fig9 = plt.subplot(9, 1, 9)\n probeLrn['outputGrpS'].plot()\n plt.title('Output spikes')\n fig9.set_xlim(fig1.get_xlim())\n\n plt.figure(2, figsize=(18, 20))\n\n Fig_w1 = plt.subplot(2, 1, 1)\n probeLrn['weight_1'][0][0].plot()\n plt.title('Weight 1')\n\n Fig_w2 = plt.subplot(2, 1, 2)\n probeLrn['weight_2'].plot()\n plt.title('Weight 2')\n Fig_w2.set_xlim(Fig_w1.get_xlim())\n\n plt.tight_layout()\n plt.show()\n\n net, comGrp, conn = self.set_up_network(turn_on_learning=False)\n comGrp['inputGen'].addSpikes([0, 1], [[15, 20],\n [10, 20]])\n probeNonLrn = self.set_up_probe(comGrp, conn)\n net.run(25)\n net.disconnect()\n\n plt.figure(3, figsize=(18, 20))\n \n fig1 = plt.subplot(9, 1, 1)\n probeNonLrn['inputGrpU'].plot()\n plt.title('Input compartment current')\n fig1.set_xlim(0, 100)\n \n fig2 = plt.subplot(9, 1, 2)\n probeNonLrn['inputGrpV'].plot()\n plt.title('Input compartment voltage')\n fig2.set_xlim(fig1.get_xlim())\n \n fig3 = plt.subplot(9, 1, 3)\n probeNonLrn['inputGrpS'].plot()\n plt.title('Input spikes')\n fig3.set_xlim(fig1.get_xlim())\n\n fig4 = plt.subplot(9, 1, 4)\n probeNonLrn['hiddenGrpU'].plot()\n plt.title('Hidden compartment current')\n fig4.set_xlim(fig1.get_xlim())\n \n fig5 = plt.subplot(9, 1, 5)\n probeNonLrn['hiddenGrpV'].plot()\n plt.title('Hidden compartment voltage')\n fig5.set_xlim(fig1.get_xlim())\n \n fig6 = plt.subplot(9, 1, 6)\n probeNonLrn['hiddenGrpS'].plot()\n plt.title('Hidden spikes')\n fig6.set_xlim(fig1.get_xlim())\n \n fig7 = plt.subplot(9, 1, 7)\n probeNonLrn['outputGrpU'].plot()\n plt.title('Output compartment current')\n fig7.set_xlim(fig1.get_xlim())\n \n fig8 = plt.subplot(9, 1, 8)\n probeNonLrn['outputGrpV'].plot()\n plt.title('Output compartment voltage')\n fig8.set_xlim(fig1.get_xlim())\n \n fig9 = plt.subplot(9, 1, 9)\n probeNonLrn['outputGrpS'].plot()\n plt.title('Output spikes')\n fig9.set_xlim(fig1.get_xlim())\n\n plt.tight_layout()\n plt.show()\n\nif __name__ == '__main__':\n snn_xor = XOR()\n snn_xor.run()","repo_name":"hello-roderickwang/Playground","sub_path":"Loihi/XOR.py","file_name":"XOR.py","file_ext":"py","file_size_in_byte":9877,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"71433378826","text":"from setuptools import setup\nimport sys\nsys.setrecursionlimit(20000)\nAPP = ['maks_app.py']\nDATA_FILES = ['violator.wav', 'non_violator.wav', 'facemask-model']\nOPTIONS = {\n\t'argv_emulation': True, \n\t'site_packages': True,\n\t'iconfile': 'maks-icon.icns',\n\t'packages': ['cv2', 'keras', 'numpy', 'PIL', 'tensorflow', 'mongo_upload'],\n\t'plist': {\n\t\t'CFBundleName': 'Maks',\n\t}\n}\nsetup(\n\tapp=APP,\n\tdata_files=DATA_FILES,\n\toptions={'py2app': OPTIONS},\n\tsetup_requires=['py2app'],\n)","repo_name":"adityashukzy/Maks","sub_path":"py2app-setups/setup_mainapp.py","file_name":"setup_mainapp.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70206750026","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport numpy as np\nimport text_processor as tp\nfrom models import *\nfrom training_handler import TrainingHandler\nfrom models import *\nfrom data_gen import *\nfrom keras.models import load_model\n\ndg = DataGen()\ntest_n_batches, test_batch_size = 30, 100 \ntest_gen = dg.gen(batch_size=test_batch_size, n_batches=test_n_batches, trainset=False)\n\nn_features = len(char2int)\nn_steps_in = dg.max_root_len\nn_steps_out = dg.max_output_len\n\ncorpus = \"dataset_train.txt\"\ntag_name = \"double_GRU_128\"\nmodel_name = \"morpholizer\"\n\nmodel = load_model(\"model_weights/morpholizer_enc_dec_256_512/model_weight-02-0.0095.hdf5\")\n# model.summary()\nfeat_units = 15\nn_dec_units = 512\nencoder_inputs = model.input[0]\n\nencoder_outputs, state_h, state_c = model.get_layer('encoder_lstm').output\nfeature_input = model.input[2]\nfeat_out = model.get_layer('feature_output').output\nstate_h = model.get_layer('dense_1').output\nstate_c = model.get_layer('dense_2').output\nencoder_states = [state_h, state_c]\nencoder_model = Model([encoder_inputs, feature_input], encoder_states)\n\n\ndecoder_inputs = model.input[1]\n# # define inference decoder\ndecoder_state_input_h = Input(shape=(n_dec_units,))\ndecoder_state_input_c = Input(shape=(n_dec_units,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\n\ndecoder_lstm = model.get_layer('decoder_lstm')\ndecoder_outputs, state_h_dec, state_c_dec = decoder_lstm(decoder_inputs, initial_state=decoder_states_inputs)\ndecoder_states = [state_h_dec, state_c_dec]\n\ndecoder_dense = model.get_layer('train_output')\ndecoder_outputs = decoder_dense(decoder_outputs)\ndecoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)\n\n\ntotal, correct = 0, 0\nsims = []\nfor b in range(test_n_batches):\n [X1, X2, X3], y = next(test_gen)\n for j in range(test_batch_size):\n X33 = X3[j].reshape((1, X3.shape[1])) \n X11 = X1[j].reshape((1, X1.shape[1], X1.shape[2]))\n target = predict(encoder_model, decoder_model, X11, X33, n_steps_out, n_features)\n root = ''.join(dg.one_hot_decode(X1[j])).replace('&', ' ')\n word = ''.join(dg.one_hot_decode(y[j])).replace('&', ' ')\n targetS = ''.join(dg.one_hot_decode(target)).replace('&', ' ')\n sims.append(dg.word_sim(word, targetS))\n if dg.one_hot_decode(y[j]) == dg.one_hot_decode(target):\n correct += 1\n print(b, root, word, targetS)\n total += test_batch_size\n \nword_sim_average = sum(sims)/len(sims)\nprint('Word Similarity Average: {0:.2f}%'.format(word_sim_average))\nprint('Accuracy: %.2f%%' % (float(correct)/float(total)*100.0))\n","repo_name":"leobitz/wolayita_morph","sub_path":"test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40606883647","text":"#!/usr/bin/env python3 \n# -*- coding: utf-8 -*-\n\"\"\"\nIn an undirected graph, the degree d(u) of a vertex u is the number of neighbors \nu has, or equivalently, the number of edges incident upon it.\n\nGiven: A simple graph with n≤103 vertices in the edge list format.\n\nReturn: An array D[1..n] where D[i] is the degree of vertex i.\n\"\"\"\n\ndef create_undirected_graph(relations, cardinality):\n graph = {vertex:[] for vertex in range(1,cardinality+1)}\n for relation in relations:\n v1, v2 = relation\n graph[v1].append(v2)\n graph[v2].append(v1)\n return graph\n\nif __name__ == '__main__':\n with open('rosalind_deg.txt') as file:\n number_of_vertices = [int(value) for value in file.readline().split()][0]\n relations = [tuple([int(number) for number in value.split()]) for value in file.readlines()]\n adjacency_list = create_undirected_graph(relations, number_of_vertices)\n print(' '.join([str(len(value)) for value in adjacency_list.values()]))\n","repo_name":"lucper/rosalind","sub_path":"deg/DegreeArray.py","file_name":"DegreeArray.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41205537939","text":"from multiprocessing import Process\nimport time\nfrom itertools import count\n\nSELECTS = [\"TOP-LEFT-SELECT\", \"TOP-RIGHT-SELECT\", \"BOTTOM-LEFT-SELECT\", \"BOTTOM-RIGHT-SELECT\"]\nMINUS = \"CENTRE-MINUS\"\nPOWER = \"POWER-BUTTON\"\nPOWER_TIMEOUT = 20\nSELECT_TIMEOUT = 10\n\n\nclass FulgorMilanoCooktop:\n\n def __init__(self):\n self.select_controls = SELECTS\n self.minus_control = MINUS\n self.power = POWER\n self.is_locked = False\n self.is_temp_locked = False\n self.is_on = False\n self.displays = [None] * 4\n\n def print_display(self, error=False):\n if self.is_on:\n if self.is_locked:\n self.displays = ['L'] * 4\n else:\n self.displays = ['0'] * 4\n if self.is_locked and not self.is_temp_locked:\n self.displays = ['0'] * 4\n if error:\n self.displays = ['E', 'R', '0', '3']\n\n print(\"***********\")\n print(self.displays[0], \" S-\", \" S-\", self.displays[1])\n print(' - ')\n print(self.displays[2], \" S-\", \" S-\", self.displays[3])\n print(\"***********\")\n\n def hold_buttons(self, buttons, interval=1):\n timeout_start = time.time()\n print(\"Operate {} buttons for {} seconds\".format(\" AND \".join(buttons), interval))\n counter = count(1)\n\n while self.is_on and time.time() < timeout_start + interval:\n print(\"Holding for {} seconds\".format(next(counter)))\n time.sleep(1)\n print(\"\\n\")\n return 0\n\n def timeout_helper(self, process, p_name):\n p1 = Process(target=process, args=(self,), name=p_name)\n p1.start()\n p1.join(timeout=min(SELECT_TIMEOUT, POWER_TIMEOUT))\n p1.terminate()\n return p1\n\n def turn_on(self):\n print(\"Turn On Stove!\")\n if not self.is_on:\n self.hold_buttons([self.power])\n if not self.is_locked:\n self.displays = [0] * 4\n self.is_on = True\n self.print_display()\n\n def lock(self, lock_operations):\n self.turn_on()\n if self.is_on:\n p1 = self.timeout_helper(lock_operations, \"Lock\")\n if p1.exitcode is None:\n self.print_display(error=True)\n print(\n \"Timeout Error: The above steps have been carried out exceeding {} seconds\".format(SELECT_TIMEOUT))\n else:\n self.is_locked = True\n self.is_temp_locked = True\n self.print_display()\n print(\"Successfully Locked!\")\n\n def unlock_temp(self, unlock_operations):\n self.turn_on()\n if self.is_on and self.is_locked:\n p1 = self.timeout_helper(unlock_operations, \"Unlocking Temp\")\n if p1.exitcode is None:\n self.print_display()\n print(\n \"Timeout Error: The above steps have been carried out exceeding {} seconds\".format(SELECT_TIMEOUT))\n else:\n self.is_temp_locked = False\n self.print_display(error=True)\n print(\"Successfully Unlocked temporality!\")\n self.is_locked = True\n self.is_temp_locked = True\n\n def unlock_perm(self, unlock_operations):\n self.turn_on()\n if self.is_on and self.is_locked:\n p1 = self.timeout_helper(unlock_operations, \"Unlocking\")\n if p1.exitcode is None:\n self.print_display(error=True)\n print(\n \"Timeout Error: The above steps have been carried out exceeding {} seconds\".format(SELECT_TIMEOUT))\n else:\n self.is_temp_locked = False\n self.is_locked = False\n self.print_display()\n print(\"Successfully Unlocked Permanently!\")\n\n\ndef operate_lock(cooktop):\n cooktop.hold_buttons([cooktop.select_controls[3], cooktop.minus_control], 2)\n cooktop.hold_buttons([cooktop.select_controls[3]])\n\n\ndef operate_unlock_temp(cooktop):\n cooktop.hold_buttons([cooktop.select_controls[3], cooktop.minus_control], 2)\n\n\ndef operate_unlock_perm(cooktop):\n cooktop.hold_buttons([cooktop.select_controls[3], cooktop.minus_control], 2)\n cooktop.hold_buttons([cooktop.minus_control])\n\n\nif __name__ == '__main__':\n my_cooktop = FulgorMilanoCooktop()\n print(\"Simulation for locking the stove:\")\n my_cooktop.lock(operate_lock)\n print(\"\\n\")\n print(\"Simulation for unlocking the stove temporality for cooking\")\n my_cooktop.unlock_temp(operate_unlock_temp)\n print(\"If I turn on the stove, it is still locked\")\n my_cooktop.print_display()\n print(\"\\n\")\n print(\"Simulation for unlocking the stove permanently \")\n my_cooktop.unlock_perm(operate_unlock_perm)\n","repo_name":"guanjiew/daily_dose_of_code","sub_path":"22-05/22-05-07/Stove.py","file_name":"Stove.py","file_ext":"py","file_size_in_byte":4785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74421314505","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 15 21:52:41 2021\n\n@author: feng779\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors \nimport matplotlib.tri as tri\nfrom matplotlib.path import Path\nfrom matplotlib.patches import PathPatch\nimport gdal\nimport time\nimport os\n\nimport tilesMODIS\n\nimport sys\nsys.path.append('../../utils') \nfrom myearth import findNearset1D, readShpPointLine, readShpPoly\n\nimport pdb\n\nclass MODIS_tools(object):\n \"\"\"\n general class for processing MODIS data\n \"\"\"\n\n def __init__(self, tiff_filepath, **kwargs):\n self.__dict__.update(kwargs)\n \n self.tiff_filepath = tiff_filepath\n \n if len(self.tiff_filepath) == 1:\n self.lonc, self.latc, self.data = self.readTile(self.tiff_filepath[0])\n elif len(self.tiff_filepath) == 2:\n self.lonc, self.latc, self.data = self.readMultiTile(self.tiff_filepath)\n else:\n print ('No option available, reduce the number of tiles!')\n \n\n def readTile(self, tilename, subset=True):\n \"\"\"\n tilename : a single tile \n \"\"\"\n \n tf = gdal.Open(tilename)\n data = tf.GetRasterBand(1).ReadAsArray()\n print(\"Size:\", data.shape)\n \n # get flood water only \n # 3 : Water detected, beyond reference water, so is likely flood.\n data[data<3] = 0\n \n # Getting the GeoTIFF extent\n geoTransform = tf.GetGeoTransform()\n min_lon = int( round(geoTransform[0]) )\n max_lon = int( round(min_lon + geoTransform[1] * tf.RasterXSize) )\n max_lat = int( round(geoTransform[3]) )\n min_lat = int( round(max_lat + geoTransform[5] * tf.RasterYSize) )\n \n lonc = np.zeros([tf.RasterXSize])\n latc = np.zeros([tf.RasterYSize])\n \n lonv = np.zeros([tf.RasterXSize+1])\n latv = np.zeros([tf.RasterYSize+1])\n \n for i in range(tf.RasterXSize):\n lonc[i] = min_lon + (0.5+i) * (max_lon-min_lon)/tf.RasterXSize\n for i in range(tf.RasterYSize):\n latc[i] = min_lat + (0.5+i) * (max_lat-min_lat)/tf.RasterYSize\n for i in range(tf.RasterXSize+1):\n lonv[i] = min_lon + i * (max_lon-min_lon)/tf.RasterXSize\n for i in range(tf.RasterYSize+1):\n latv[i] = min_lat + i * (max_lat-min_lat)/tf.RasterYSize\n \n # need to flip the raster upside down\n data = np.flipud( data )\n \n if subset:\n print ('subsetting the orignal Tile')\n lonc, latc, data = self.subsetTile(lonc, latc, data)\n \n \n lonc, latc = np.meshgrid(lonc, latc) \n #pdb.set_trace()\n return lonc.ravel(), latc.ravel(), data.ravel()\n \n def readMultiTile(self, tilelist, subset=True):\n \n lonc1, latc1, data1 = self.readTile(tilelist[0], subset=subset)\n lonc2, latc2, data2 = self.readTile(tilelist[1], subset=subset)\n \n lonc = np.concatenate([lonc1.ravel(),lonc2.ravel()])\n latc = np.concatenate([latc1.ravel(),latc2.ravel()])\n data = np.concatenate([data1.ravel(),data2.ravel()])\n \n return lonc, latc, data\n \n \n def subsetTile(self, lonc, latc, data):\n \"\"\"\n subset a tile for fast processing\n \"\"\"\n ## bbox of the subset region\n bbox = [-80.58, -74.3, 36.65, 43]\n ind_lon0 = findNearset1D(bbox[0], lonc)\n ind_lon1 = findNearset1D(bbox[1], lonc)\n ind_lat0 = findNearset1D(bbox[2], latc)\n ind_lat1 = findNearset1D(bbox[3], latc)\n \n return lonc[ind_lon0:ind_lon1+1], latc[ind_lat0:ind_lat1+1], \\\n data[ind_lat0:ind_lat1+1, ind_lon0:ind_lon1+1]\n \n \n \n def tricontour(self, figname):\n \"\"\"\n generate the contour plot\n the trigrid configuration is for two tiles\n \"\"\"\n \n \n c_nodata = '#ffffff'\n c1 = 'b'\n c2 = '#ffed66'\n c3 = '#ff5959'\n #cmap = matplotlib.colors.ListedColormap([c_nodata, c1, c2, c3])\n #bounds = [0, 1, 2, 3]\n cmap = matplotlib.colors.ListedColormap([c_nodata, c1, c3])\n bounds = [0, 1, 3]\n \n \n plt.rcParams.update({'font.size': 18}) \n #fig = plt.figure(figsize=(18,10))\n fig = plt.figure(figsize=(12,10))\n ax = fig.add_subplot(111)\n #cs = ax.contourf(self.lonc, self.latc, data, bounds, cmap=cmap, vmin=bounds[0], vmax=bounds[-1])\n \n ## tricontour\n triang = tri.Triangulation(self.lonc, self.latc) \n cs = ax.tricontourf(triang, self.data, bounds, cmap=cmap, vmin=bounds[0], vmax=bounds[-1], extend='max')\n \n self.topo_lines(ax)\n \n #clip_shp = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/ICoM_domain/srb_reprojected/srb_reprojected.shp'\n clip_shp = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/ICoM_domain/Chesapeake_Bay_Watershed_Boundary/Chesapeake_Bay_Watershed_Boundary.shp'\n clip_shp2 = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/ICoM_domain/drbbnd_reprojected/drb_bnd_polygon_reproj.shp'\n #clip = self.shp_clip(ax, clip_shp)\n clip = self.shp_clip2(ax, clip_shp, clip_shp2)\n for contour in cs.collections:\n contour.set_clip_path(clip)\n \n self.multi_river_basin(ax)\n \n ax.set_xlim([-82, -72])\n ax.set_ylim([36, 44])\n ax.set_xlabel('Longitude')\n ax.set_ylabel('Latitude')\n ax.set_aspect('equal')\n fig.tight_layout()\n plt.savefig(figname)\n plt.close()\n \n # from mpl_toolkits.axes_grid1 import make_axes_locatable\n # divider = make_axes_locatable(ax)\n # cax = divider.append_axes(\"right\", size=\"3%\", pad=0.05)\n # cb = fig.colorbar(cs, cax=cax, orientation='vertical')\n # cb.ax.tick_params(labelsize=12)\n # cb.ax.yaxis.offsetText.set_fontsize(12)\n # cb.set_label('Flood water', fontsize=14)\n \n \n def topo_lines(self, ax):\n \"\"\"\n function that reads topology shapefiles and overlay on the figure\n \"\"\"\n\n shpfile = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/boundary_lines/ne_10m_coastline/ne_10m_coastline.shp'\n XY, field = readShpPointLine(shpfile)\n \n for line in XY:\n X = line[:,0]\n Y = line[:,1]\n ax.plot(X, Y, '-k', linewidth=0.1)\n \n def shp_clip(self, ax, clip_shp):\n \"\"\"\n create clip based on a shapefile \n \"\"\"\n #shp = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/ICoM_domain/srb/srb.shp'\n ## reprojected in QGIS, set coordinate system to WGS84 EPSG(4326)\n ## use reproject layer tool\n ## https://gis.stackexchange.com/questions/35590/reprojecting-vector-layer-in-qgis\n \n XY, field = readShpPoly(clip_shp)\n \n for line in XY:\n X = line[:,0]\n Y = line[:,1]\n ax.plot(X, Y, '-k', linewidth=0.5)\n \n vertices = np.asarray(XY[0])\n codes = [Path.MOVETO] + [Path.LINETO]*(vertices.shape[0]-2) + [Path.CLOSEPOLY]\n \n clip = Path(vertices, codes)\n clip = PathPatch(clip, transform=ax.transData)\n #clip = PathPatch(clip, facecolor = 'white')\n #pdb.set_trace()\n return clip\n \n def shp_clip2(self, ax, clip_shp, clip_shp2):\n \"\"\"\n create clip based on multi shapefile \n \"\"\"\n XY1, field1 = readShpPoly(clip_shp)\n XY2, field2 = readShpPoly(clip_shp2)\n \n XY = [XY1[0], XY2[0]]\n \n X = XY1[0][:,0]\n Y = XY1[0][:,1]\n ax.plot(X, Y, '-k', linewidth=1.0)\n \n X = XY2[0][:,0]\n Y = XY2[0][:,1]\n ax.plot(X, Y, '-k', linewidth=1.0)\n \n vertices = []\n codes = []\n for i in range(2):\n for j in range(len(XY[i])):\n vertices.append((XY[i][j][0], XY[i][j][1]))\n #pdb.set_trace()\n codes += [Path.MOVETO]\n codes += [Path.LINETO] * (len(XY[i]) -2)\n codes += [Path.CLOSEPOLY]\n \n #vertices = np.asarray(XY[0])\n #codes = [Path.MOVETO] + [Path.LINETO]*(vertices.shape[0]-2) + [Path.CLOSEPOLY]\n \n clip = Path(vertices, codes)\n clip = PathPatch(clip, transform=ax.transData)\n #clip = PathPatch(clip, facecolor = 'white')\n #pdb.set_trace()\n return clip\n \n def multi_river_basin(self, ax):\n \"\"\"\n multiple river basin boundaries\n \"\"\"\n \n shp_delaware = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/ICoM_domain/drbbnd_reprojected/drb_bnd_polygon_reproj.shp'\n shp_susquehanna = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/ICoM_domain/srb_reprojected/srb_reprojected.shp'\n shp_potomac = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/chesapeake-bay-data/Potomac_river_basin/Potomac_river_basin.shp'\n shp_patuxent = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/chesapeake-bay-data/Patuxent_river_basin/Patuxent_river_basin.shp'\n shp_choptank = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/chesapeake-bay-data/Choptank_river_basin/Choptank_river_basin.shp'\n shp_rappahannock = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/chesapeake-bay-data/Rappahannock_river_basin/Rappahannock_river_basin.shp'\n shp_mattaponi_pamunkey = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/chesapeake-bay-data/Mattaponi_Pamunkey_river_basin/Mattaponi_Pamunkey_river_basin.shp'\n shp_james_appomattox = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/chesapeake-bay-data/James_river_basin/James_river_basin.shp'\n \n shp_susquehanna_bay = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/ICoM_domain/clipped_bay_boundary/Chesapeake_bay_poly.shp'\n shp_delaware_bay = '/Users/feng779/OneDrive - PNNL/Documents/DATA/SHP/ICoM_domain/clipped_bay_boundary/Delaware_bay_poly.shp'\n \n self.basin_bound(ax, shp_susquehanna)\n self.basin_bound(ax, shp_potomac)\n self.basin_bound(ax, shp_patuxent)\n self.basin_bound(ax, shp_choptank)\n self.basin_bound(ax, shp_rappahannock)\n self.basin_bound(ax, shp_mattaponi_pamunkey)\n self.basin_bound(ax, shp_james_appomattox)\n \n self.basin_bound(ax, shp_susquehanna_bay)\n self.basin_bound(ax, shp_delaware_bay)\n \n \n def basin_bound(self, ax, shpfile):\n \n XY, field = readShpPoly(shpfile)\n \n for line in XY:\n X = line[:,0]\n Y = line[:,1]\n ax.plot(X, Y, '-k', linewidth=0.3)\n \n\n\ndef plotTile(lonc, latc, lonv, latv, data):\n \"\"\"\n imshow\n \"\"\"\n import cartopy, cartopy.crs as ccrs \n import matplotlib.colors \n \n extent = [lonv.min(), latv.min(), lonv.max(), latv.max()]\n #pdb.set_trace()\n fig = plt.figure(figsize=(12, 12.5)) \n ax = plt.axes([0.05, 0.05, 0.9, 0.9], projection=ccrs.PlateCarree())\n img_extent = [extent[0], extent[2], extent[1], extent[3]]\n ax.set_extent([extent[0], extent[2], extent[1], extent[3]], ccrs.PlateCarree())\n \n c_nodata = '#ffffff'\n c2 = '#4679fa'\n c20 = '#46befa'\n c40 = '#61fa46'\n c60 = '#e2fa46'\n c80 = '#faa346'\n c100 = '#fa4c46'\n cmap = matplotlib.colors.ListedColormap([c_nodata, c2, c20, c40, c60, c80, c100])\n boundaries = [0, 2, 20, 40, 60, 80, 100]\n\n print(\"Plotting the Map Elements...\")\n #vmin = 0\n #vmax = 100\n norm = matplotlib.colors.BoundaryNorm(boundaries, cmap.N, clip=True)\n img1 = ax.imshow(data, origin='upper', extent=img_extent, cmap=cmap, norm=norm, zorder=1)\n \n return img1\n \n\n\ndef plotTile_patches(lonc, latc, lonv, latv, data):\n \"\"\"\n plot the tile using matplotlib patcheCollection\n \"\"\"\n from matplotlib.patches import Polygon\n from matplotlib.collections import PatchCollection\n \n # need to flip the raster upside down\n data = np.flipud( data )\n \n ## delta for calculating cell vertices from cell center\n dlon = lonc[0] - lonc[1] / 2.\n dlat = latc[0] - latc[1] / 2.\n \n #lonp = np.zeros([len(lon), 4])\n #latp = np.zeros([len(lat), 4])\n patches = []\n for i, llon in enumerate(lonc):\n for j, llat in enumerate(latc):\n lon_w = llon - dlon\n lon_e = llon + dlon\n lat_n = llat + dlat\n lat_s = llat - dlat \n ## counter-clockwise vertices for each cell\n xp = np.asarray([lon_w, lon_e, lon_e, lon_w])\n yp = np.asarray([lat_s, lat_n, lat_n, lat_s])\n patches.append(Polygon(np.vstack([xp, yp]).T))\n \n pdb.set_trace()\n cmap = 'viridis'\n pc = PatchCollection(patches, cmap=cmap)\n pc.set_array(data.flatten())\n pc.set_lw(0.1)\n \n plt.rcParams.update({'font.size': 18})\n fig = plt.figure(figsize=(12,6))\n ax = fig.add_subplot(111)\n ax.add_collection(pc)\n ax.set_xlim([lonv.min(), lonv.max()])\n ax.set_ylim([latv.min(), latv.max()])\n ax.set_aspect('equal')\n plt.show()\n \n \n \n #pdb.set_trace()\ndef plotTile_pcolor(lonc, latc, lonv, latv, data):\n \"\"\"\n make plots using the pcolor function\n \"\"\"\n \n # need to flip the raster upside down\n data = np.flipud( data )\n \n xv, yv = np.meshgrid(lonv, latv) \n plt.rcParams.update({'font.size': 18})\n fig = plt.figure(figsize=(12,6))\n ax = fig.add_subplot(111)\n ax.pcolor(xv, yv, data, cmap='RdBu')\n ax.set_aspect('equal')\n plt.show()\n \n \n \n \nif __name__ == \"__main__\":\n \n\n from datetime import datetime, timedelta\n #starttime = '2012-10-22'\n #endtime = '2012-10-28'\n starttime = '2012-10-22'\n endtime = '2012-11-13'\n starttime = datetime.strptime(starttime, '%Y-%m-%d')\n endtime = datetime.strptime(endtime, '%Y-%m-%d')\n times_data = [starttime + timedelta(days=x) for x in range(0, (endtime-starttime).days+1)]\n ddir = '/Users/feng779/OneDrive - PNNL/Documents/DATA/MODIS'\n for timei in times_data:\n print (timei)\n yday = timei.timetuple().tm_yday\n filepath1 = os.path.join(ddir, '080W050N/MWP_{}{:03d}_080W050N_2D2OT.tif'.format(timei.year, yday) )\n filepath2 = os.path.join(ddir, '080W040N/MWP_{}{:03d}_080W040N_2D2OT.tif'.format(timei.year, yday) )\n if os.path.exists(filepath1) and os.path.exists(filepath2):\n tiff_filepath = [filepath1, filepath2]\n MT = MODIS_tools(tiff_filepath)\n figname = '/Users/feng779/OneDrive - PNNL/Documents/CODE/DATA/MODIS/figures/{}.png'.format(datetime.strftime(timei, '%Y%m%d'))\n MT.tricontour(figname)\n #pdb.set_trace()\n \n ","repo_name":"fdongyu/DATA","sub_path":"MODIS/MODIS_tools.py","file_name":"MODIS_tools.py","file_ext":"py","file_size_in_byte":14857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8690778778","text":"# import random\n#\n# source = [x for x in range(10)]\n# random.shuffle(source)\n#\n# def bubblesort(x):\n# length = len(x)-1\n#\n# for i in range(length):\n# for j in range(length -i):\n# if x[j] > x[j+1]:\n# x[j], x[j+1] = x[j+1], x[j]\n# # print(x)\n#\n# # print(x)\n#\n# return x\n#\n# bubblesort(source)\n#\n# print(source)\n\ndef bubble_sort(data):\n data_len = len(data)\n\n for i in range(data_len - 1):\n for j in range(data_len -i -1):\n if data[j] > data[j+1]:\n data[j], data[j+1] = data[j+1], data[j]\n\nif __name__ == \"__main__\":\n li = [2,3,5,2,3,8,6,7,10,8,1,4]\n bubble_sort(li)\n print(li)\n\n","repo_name":"YunsikPark/DataStructure_Algorithm","sub_path":"algorithm/sorting/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17964446347","text":"import argparse\nimport functools\nimport io\nimport json\n\nimport torch\nfrom aiohttp import web\nfrom PIL import Image\n\nfrom rotate_captcha_crack.common import device\nfrom rotate_captcha_crack.logging import RCCLogger\nfrom rotate_captcha_crack.model import RotNetR, WhereIsMyModel\nfrom rotate_captcha_crack.utils import process_captcha\n\nlogger = RCCLogger()\nroutes = web.RouteTableDef()\n\ndumps = functools.partial(json.dumps, separators=(',', ':'))\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--index\", \"-i\", type=int, default=-1, help=\"Use which index\")\nopts = parser.parse_args()\n\nmodel = RotNetR(cls_num=180, train=False)\nmodel_path = WhereIsMyModel(model).with_index(opts.index).model_dir / \"best.pth\"\nmodel.load_state_dict(torch.load(str(model_path)))\nmodel = model.to(device=device)\nmodel.eval()\n\n\n@routes.post('/')\nasync def hello(request: web.Request):\n resp = {'err': {'code': 0, 'msg': 'success'}}\n\n try:\n multipart = await request.multipart()\n img_part = await multipart.next()\n img_bytes = await img_part.read()\n img = Image.open(io.BytesIO(img_bytes))\n\n with torch.no_grad():\n img_ts = process_captcha(img)\n img_ts = img_ts.to(device=device)\n predict = model.predict(img_ts) * 360\n resp['pred'] = predict\n\n except Exception as err:\n resp['err']['code'] = 0x0001\n resp['err']['msg'] = str(err)\n return web.json_response(resp, status=400, dumps=dumps)\n\n return web.json_response(resp, dumps=dumps)\n\n\napp = web.Application()\napp.add_routes(routes)\nweb.run_app(app, port=4396, access_log_format='%a \"%r\" %s %b', access_log=logger)\n","repo_name":"Starry-OvO/rotate-captcha-crack","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":175,"dataset":"github-code","pt":"81"} +{"seq_id":"28200218976","text":"# -*- coding: utf-8 -*-\n\ndef factorial(numero):\n if(not isinstance(numero, int) or numero < 0):\n print(\"Número no válido\")\n elif(numero == 0):\n return 0\n else:\n i = 1\n acumulado = 1\n while(i <= numero):\n acumulado = acumulado * i\n i = i + 1\n return acumulado\n\ndef combinacion(n,k):\n if(k >= n or not isinstance(k, int) or not isinstance(n, int) or k < 1 or n < 1):\n print(\"Número no válido\")\n else:\n return factorial(n)/(factorial(k)*factorial(n - k))\n\nprint(combinacion(400,270))","repo_name":"david20lpez/Complexity","sub_path":"Taller 1/combinacion.py","file_name":"combinacion.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32961239294","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 20 08:45:29 2020\n\n@author: Imre Antalfy\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n###\n# functions\n\ndef MakeSubset(data,start,end):\n \"\"\" If a read file contains multiple dataset, make subsets\n returns subset\n \"\"\"\n subset = list(range(start,end))\n result = data.filter(items= subset, axis=0)\n return result\n \ndef FillEmpties_Convert(df,colname,dtype):\n \"\"\" takes a column as input\n searches for \"-\" values, replaces with NaN, fowardfills NaN\n reworks the datatype of the column\n \"\"\"\n df.loc[df[colname] == '-',colname] = np.NaN \n df[colname].fillna(method='pad', inplace=True)\n df[colname] = df[colname].astype(dtype)\n \n \n###\n# Main call\n \n# read all data and merge into two dataframes, with daily and hourly basis\nwae = pd.read_csv('order_74784_data.csv', sep=';')\n\n#look at the data\nwae.shape # look at shape\n\n# del WAE, not needed\nwae = wae.drop(columns=\"stn\")\n\n# CAREFUL!!! from row 339509 a new dataset begins\n# Extract first subset\n# rename columns\ncolnames1 = [\"time\",\"AirTemp\",\"WindSpeed\",\"WindDir\"]\nwae_hourly = MakeSubset(wae,0,339509)\nwae_hourly.columns = colnames1\n\n# good cut?\nwae_hourly.tail(3)\n\nwae_daily = MakeSubset(wae,339510,353656)\ncolnames2 = [\"time\", \"AirTemp_Dmax\", \"AirTemp_Dmin\", \"AirTemp_Dmean\"]\nwae_daily.columns = colnames2\n\n### wae daily and wae hourly contain different datasets\nwae_daily.dtypes\nwae_hourly.dtypes\n\n# the datatypes are wrong\n# rework daily first\nwae_daily['time'] = pd.to_datetime(wae_daily['time'])\nwae_daily['AirTemp_Dmax'] = wae_daily['AirTemp_Dmax'].astype(float)\nwae_daily['AirTemp_Dmin'] = wae_daily['AirTemp_Dmin'].astype(float)\nwae_daily['AirTemp_Dmean'] = wae_daily['AirTemp_Dmean'].astype(float)\n\n# correct?\nwae_daily.dtypes\n\n# time based indexing\nwae_daily = wae_daily.set_index('time')\nwae_daily.head(3)\n\n# same procedure for the hourly data\nwae_hourly['time'] = wae_hourly['time'].astype(str)\nwae_hourly['time'] = pd.to_datetime(wae_hourly['time'],format='%Y%m%d%H')\n\nwae_hourly = wae_hourly.set_index('time')\n\n### Handling missing data, forward fill, convert\nFillEmpties_Convert(wae_hourly,'AirTemp',float)\nFillEmpties_Convert(wae_hourly,'WindSpeed',float)\nFillEmpties_Convert(wae_hourly,'WindDir',int)\n\n#####################################################\n# order_74785, hourly data\nwae = pd.read_csv('order_74785_data.csv', sep=';')\nwae = wae.drop(columns=\"stn\")\n\nwae.dtypes\n\nwae['time'] = pd.to_datetime(wae['time'],format='%Y%m%d%H')\nwae = wae.set_index('time')\nwae.head(3)\nwae.columns = [\"SoilTemp_Hmean\",\"GlobRad_Hmean\",\"Rain_Hsum\",\"RelHumid_Hmean\",\"Press_Hmean\"]\n\nFillEmpties_Convert(wae,'SoilTemp_Hmean',float)\nFillEmpties_Convert(wae,'GlobRad_Hmean',int)\nFillEmpties_Convert(wae,'Rain_Hsum',float)\nFillEmpties_Convert(wae,'RelHumid_Hmean',float)\nFillEmpties_Convert(wae,'Press_Hmean',float)\n\n#this dataset is hourly based, append it to wae_hourly\nwae_hourly = pd.merge(wae_hourly, wae, on='time',how='outer')\n\n#####################################################\n# order_74831, daily data\nwae = pd.read_csv('order_74831_data.csv', sep=';')\nwae = wae.drop(columns=\"stn\")\n\nwae.dtypes\n# ths data is correctly assigned., also, no missing values\n\n# at closer inspection, this datafile was the the same as the second dataframe\n# appearing halway into order 74784. This is airtemp_dmax and airtemp_dmin\n# => this data has not been taken into account, as it was already present\n\n#####################################################\n# order_75248, daily data\n\n# this order contains Airtemp_Dmean, which is already accounted for in \n# oder 74784\n\n#####################################################\n### Plotting\n# based on the tutorial, lets make some data visible\n\n# Use seaborn style defaults and set the default figure size\nsns.set(rc={'figure.figsize':(11, 4)})\n\n# some first testplots\nwae_daily['AirTemp_Dmean'].plot(linewidth=0.5);\nwae_hourly['WindSpeed'].plot(linewidth=0.5);\n\n# lets see, how the rain, temperature and rain changed over the years\ncols_plot = ['AirTemp', 'GlobRad_Hmean', 'Rain_Hsum']\naxes = wae_hourly[cols_plot].plot(marker='.', alpha=0.5, linestyle='None', figsize=(11, 9), subplots=True)\nfor ax in axes:\n ax.set_ylabel('Hourly Totals')\n\n# conclusion:\n# there is way to much ink with the hourly data. But there is clearly \n# periodicity. resampling surely needed, but lets take alook at periodicity\n \n# periocity\nax = wae_hourly.loc['2017', 'AirTemp'].plot()\nax.set_ylabel('Hourly temperature (°C)');\n# this plot has still a lot of ink, although it shows the periodicity of the \n# temp during the year nicely\n\nax = wae_hourly.loc['2017', 'GlobRad_Hmean'].plot()\nax.set_ylabel('Radiation (W/m^2');\n# this plot is not that usefuel, as a lot of the values are 0.\n# maybe a rolling window?\n\nax = wae_hourly.loc['2017', 'Rain_Hsum'].plot()\nax.set_ylabel('Rainfall hourly sum (mm)');\n# same story as with the global radiation\n\n### \n# For the temperature, a boxplot is also quiet nice\n# Boxplot for min/max/mean temperature on daily basis\n# for this, we need to first group the data by month\nwae_daily['Month'] = wae_daily.index.month\n\nfig, axes = plt.subplots(3, 1, figsize=(11, 10), sharex=True)\nfor name, ax in zip(['AirTemp_Dmax', 'AirTemp_Dmin', 'AirTemp_Dmean'], axes):\n sns.boxplot(data=wae_daily, x='Month', y=name, ax=ax)\n ax.set_ylabel('°C')\n ax.set_title(name)\n\n###\n# resampling \n# instead of having hourly temperature data, weekly temperature data would \n# make plots less crowded\ndata_columns = ['AirTemp', 'GlobRad_Hmean', 'Rain_Hsum']\n# Resample to weekly frequency, aggregating with mean\nwae_hourly_mean = wae_hourly[data_columns].resample('W').mean()\nwae_hourly_mean.head(3)\n\n# compare hourly and weekly data for 6 month\n# Start and end of the date range to extract\nstart, end = '2017-01', '2017-06'\n# Plot daily and weekly resampled time series together\nfig, ax = plt.subplots()\nax.plot(wae_hourly.loc[start:end, 'GlobRad_Hmean'],\nmarker='.', linestyle='-', linewidth=0.5, label='Hourly')\nax.plot(wae_hourly_mean.loc[start:end, 'GlobRad_Hmean'],\nmarker='o', markersize=8, linestyle='-', label='Weekly Mean Resample')\nax.set_ylabel('Global Radiation (W/m^2)')\nax.legend();\n\n# for the global radiation, this smooths the data to see the structure way\n# easier by the eye. \n\n###\n# rolling windows\n# for rain, coulda rolling mean provide better visuals?\n\n# Compute the centered 7-day rolling mean\nwae_hourly_7d = wae_hourly[data_columns].rolling(7, center=True).mean()\nwae_hourly_7d.head(10)\n# Start and end of the date range to extract\nstart, end = '2017-01', '2017-06'\n# Plot daily, weekly resampled, and 7-day rolling mean time series together\nfig, ax = plt.subplots()\nax.plot(wae_hourly.loc[start:end, 'Rain_Hsum'],\nmarker='.', linestyle='-', linewidth=0.5, label='Daily')\n\nax.plot(wae_hourly_mean.loc[start:end, 'Rain_Hsum'],\nmarker='o', markersize=8, linestyle='-', label='Weekly Mean Resample')\n\nax.plot(wae_hourly_7d.loc[start:end, 'Rain_Hsum'],\nmarker='.', linestyle='-', label='7-d Rolling Mean')\nax.set_ylabel('Rainfall hourly sum (mm)')\nax.legend();\n\n# conclusion\n# 7 days are a too small window, the data is still not clearly visible\n# in comparison to the Weekly resample\n\n############################################\n### Climate change discussion\n# did it really get warmer?\n# compare the temperature over the first three years to the last three \n# years in the dataset\ndata_columns = ['AirTemp_Dmax', 'AirTemp_Dmin', 'AirTemp_Dmean']\n\nwae_daily_7d = wae_daily[data_columns].rolling(30, center=True).mean()\nwae_daily_7d.head(10)\n\nwae_daily_365d = wae_daily[data_columns].rolling(window=365, center=True, min_periods=360).mean()\n\nstart1, end1 = '1981', '1983'\nstart2, end2 = '2017', '2019'\n\nfig, ax = plt.subplots()\nax.plot(wae_daily_365d.loc[start1:end1, 'AirTemp_Dmax'],\nmarker='.', linestyle='-', linewidth=0.5, label='Daily')\nax.set_ylabel('Temperature (°C)')\nax.set_title('Mean 365d-rolling temperauter over 1981-1983 (°C)');\n\nfig, ax = plt.subplots()\nax.plot(wae_daily_365d.loc[start2:end2, 'AirTemp_Dmax'],\nmarker='.', linestyle='-', linewidth=0.5, label='Daily')\nax.set_ylabel('Temperature (°C)')\nax.set_title('Mean 365d-rolling temperauter over 2017-2019 (°C)');\n\n# conclusion:\n# yes, it gets warmer, but is this time period well enoguh defined?\n# the next step needs to be to merge both lines into onefigure\n\n###\n# But, did more extreme wheater situations happen in this time period?\n# rain?\n# make frequency plot (histogram), based on weekyl resample\nwae_hourly_mean.head(3)\n\nfig, ax = plt.subplots()\nn, bins, patches = ax.hist(wae_hourly_mean[\"Rain_Hsum\"][0:161], 30, density=1)\nax.set_xlabel('Rainfall hourly sum (mm')\nax.set_ylabel('Probability density')\nax.set_title('Histogram of weekly rainfall from 1981 to 1983')\n\nfig, ax = plt.subplots()\nn, bins, patches = ax.hist(wae_hourly_mean[\"Rain_Hsum\"][1861:2022], 30, density=1)\nax.set_xlabel('Rainfall hourly sum (mm')\nax.set_ylabel('Probability density')\nax.set_title('Histogram of weekly rainfall from 2017 to 2019')\n\n# or together\nfig, ax = plt.subplots()\nn, bins, patches = ax.hist(wae_hourly_mean[\"Rain_Hsum\"][0:161], 30, density=1)\nax.set_xlabel('Rainfall hourly sum (mm')\nax.set_ylabel('Probability density')\nax.set_title('Histogram of weekly rainfall from 1981 to 1983')\n\n#fig, ax = plt.subplots()\nn, bins, patches = ax.hist(wae_hourly_mean[\"Rain_Hsum\"][1862:2022], 30, density=1)\nax.set_xlabel('Rainfall hourly sum (mm')\nax.set_ylabel('Probability density')\nax.set_title('Histogram of weekly Rainfall from 2017 to 2019')","repo_name":"imre-antalfy/Track-2-python---VSA","sub_path":"waediWheater.py","file_name":"waediWheater.py","file_ext":"py","file_size_in_byte":9590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3946772954","text":"import itertools\r\nfrom collections import Counter\r\n\r\nclass Knuth:\r\n\r\n def __init__(self, size_of_fleet, routes, no_of_flights):\r\n self.route_combinations = []\r\n self.size_of_fleet = size_of_fleet\r\n self.routes = routes\r\n self.no_of_flights = no_of_flights\r\n self.exact_covers = []\r\n\r\n @staticmethod\r\n def flatten(list_of_routes):\r\n flat_list = []\r\n for route in list_of_routes:\r\n flat_list += route\r\n return flat_list\r\n\r\n def check_size(self, flat_list):\r\n if len(flat_list) == self.no_of_flights:\r\n return True\r\n else:\r\n return False\r\n\r\n @staticmethod\r\n def check_unique_flights(flat_list):\r\n counter = Counter(flat_list)\r\n for values in counter.values():\r\n if values > 1:\r\n return False\r\n return True\r\n\r\n def generate_exact_cover(self):\r\n self.route_combinations = itertools.combinations(self.routes, self.size_of_fleet)\r\n for comb in self.route_combinations:\r\n flat_list = self.flatten(comb)\r\n if self.check_size(flat_list) and self.check_unique_flights(flat_list):\r\n self.exact_covers.append(comb)\r\n","repo_name":"rudyszymczok/TAP","sub_path":"Knuth.py","file_name":"Knuth.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38544398775","text":"from typing import Optional, Text, Tuple, Union\n\nfrom causalimpact import indices\nfrom causalimpact import standardize\nimport pandas as pd\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\n\nclass CausalImpactData:\n \"\"\"Class for storing and preparing data for modeling.\n\n This class handles all of the data-related functions of CausalImpact. It\n makes sure the input data is given in an appropriate format (as a pandas\n DataFrame or convertible to pd.DataFrame), checks that the given\n outcome/feature column names exist, and splits the data into the pre-period\n and post-period based on the given treatment start time OR tuples defining\n the start/end points of the pre/post periods.\n\n If the pre- and post-periods do not cover the entire timespan of\n the data, the excluded portions will be used for plotting but NOT for\n fitting the model or calculating impact statistics.\n\n Note that the default is to standardize the pre-period data to have mean zero\n and standard deviation one. The standardization is done for each column\n separately, and then applied to the post-period data using each column's pre-\n period mean and standard deviation. The mean and standard deviation for the\n outcome column are stored so that the inferences can be back-transformed to\n the original scale.\n\n\n Attributes:\n data: Pandas DataFrame of timeseries data.\n pre_period: Start and end value in data.index for pre-intervention.\n post_period: Start and end value in data.index for post-intervention.\n outcome_column: Timeseries being modeled. Defaults to first column of\n `data`.\n feature_columns: Subset of data.columns used as covariates. `None` in case\n there are no covariates. Defaults to all non-outcome columns (or `None` if\n there are none).\n standardize_data: Boolean: Whether covariates and outcome were scaled to\n have 0 mean and 1 standard deviation.\n pre_data: Subset of `data` from `pre_period`. This is unscaled.\n after_pre_data: Subset of `data` from after the `pre_period`. The time\n between pre-period and post-period should still be forecasted to make\n accurate post-period predictions. Additionally, users are interested in\n after post-period predictions. This is unscaled.\n num_steps_forecast: Number of elements (including NaN) to forecast for,\n including the post-period and time between pre-period and post-period.\n model_pre_data: Scaled subset of `data` from `pre_period` used for fitting\n the model.\n model_after_pre_data: Scaled subset of `data` from `post_period` used for\n fitting the model.\n outcome_scaler: A `standardize.Scaler` object used to transform outcome\n data.\n feature_ts: A pd.DataFrame of the scaled data over just the feature columns.\n outcome_ts: A tfp.sts.MaskedTimeSeries instance of the outcome data from the\n `pre_period`.\n \"\"\"\n\n def __init__(self,\n data: Union[pd.DataFrame, pd.Series],\n pre_period: Tuple[indices.InputDateType, indices.InputDateType],\n post_period: Tuple[indices.InputDateType, indices.InputDateType],\n outcome_column: Optional[Text] = None,\n standardize_data=True,\n dtype=tf.float32):\n \"\"\"Constructs a `CausalImpactData` instance.\n\n Args:\n data: Pandas `DataFrame` containing an outcome time series and optional\n feature time series.\n pre_period: Pre-period start and end (see InputDateType).\n post_period: Post-period start and end (see InputDateType).\n outcome_column: String giving the name of the outcome column in `data`. If\n not specified, the first column in `data` is used.\n standardize_data: If covariates and output should be standardized.\n dtype: The dtype to use throughout computation.\n \"\"\"\n # This is a no-op in case data is a pd.DataFrame. It is common enough to\n # pass a pd.Series that this is useful here.\n data = pd.DataFrame(data)\n self.pre_period, self.post_period = indices.parse_and_validate_date_data(\n data=data, pre_period=pre_period, post_period=post_period)\n self.data, self.outcome_column, self.feature_columns = (\n _validate_data_and_columns(data, outcome_column))\n del data # To insure the unfiltered DataFrame is not used again.\n self.standardize_data = standardize_data\n self.pre_data = self.data.loc[(self.data.index >= self.pre_period[0])\n & (self.data.index <= self.pre_period[1])]\n # after_pre_data intentionally includes everything after the end of the\n # pre-period since the time between pre- and post-period needs to be\n # accounted for and we actually want to see predictions after the post\n # period.\n self.after_pre_data = self.data.loc[self.data.index > self.pre_period[1]]\n self.num_steps_forecast = len(self.after_pre_data.index)\n\n if self.standardize_data:\n scaler = standardize.Scaler().fit(self.pre_data)\n self.outcome_scaler = standardize.Scaler().fit(\n self.pre_data[self.outcome_column])\n self.model_pre_data = scaler.transform(self.pre_data)\n self.model_after_pre_data = scaler.transform(self.after_pre_data)\n else:\n self.outcome_scaler = None\n self.model_pre_data = self.pre_data\n self.model_after_pre_data = self.after_pre_data\n\n out_ts = tf.convert_to_tensor(\n self.model_pre_data[self.outcome_column], dtype=dtype)\n self.outcome_ts = tfp.sts.MaskedTimeSeries(\n time_series=out_ts, is_missing=tf.math.is_nan(out_ts))\n if self.feature_columns is not None:\n # Here we have to use the FULL time series so that the post-period\n # feature data can be used for forecasting.\n features_pre = self.model_pre_data[self.feature_columns]\n features_post = self.model_after_pre_data[self.feature_columns]\n self.feature_ts = pd.concat([features_pre, features_post], axis=0)\n self.feature_ts[\"intercept_\"] = 1.\n else:\n self.feature_ts = None\n\n\ndef _validate_data_and_columns(data: pd.DataFrame,\n outcome_column: Optional[str]):\n \"\"\"Validates data and sets defaults for feature and outcome columns.\n\n By default, the first column of the dataframe will be used as the outcome,\n and the rest will be used as features, but these can instead be provided.\n\n Args:\n data: Input dataframe for analysis.\n outcome_column: Optional string to use for the outcome.\n\n Raises:\n KeyError: if `outcome_column` is not in the data.\n ValueError: if `outcome_column` is constant.\n\n Returns:\n The validated (possibly default) data, outcome column, and feature columns.\n \"\"\"\n\n # Check outcome column -- if not specified, default is the first column.\n if outcome_column is None:\n outcome_column = data.columns[0]\n if outcome_column not in data.columns:\n raise KeyError(f\"Specified `outcome_column` ({outcome_column}) not found \"\n f\"in data\")\n\n # Make sure outcome column is not constant\n if data[outcome_column].std(skipna=True, ddof=0) == 0:\n raise ValueError(\"Input response cannot be constant.\")\n\n # Feature columns are all those other than the output column. Use\n # `original_column_order` to keep track of the\n # original column order, since set(data.columns) reorders the\n # columns, which leads to problems later when subsetting and transforming.\n if data.shape[1] <= 1:\n feature_columns = None\n else:\n original_column_order = data.columns\n column_differences = set(data.columns).difference([outcome_column])\n feature_columns = [\n col for col in original_column_order if col in column_differences\n ]\n data = data[[outcome_column] + (feature_columns or [])]\n if data[outcome_column].count() < 3: # Series.count() is for non-NaN values.\n raise ValueError(\"Input data must have at least 3 observations.\")\n if data[feature_columns or []].isna().values.any():\n raise ValueError(\"Input data cannot have any missing values.\")\n if not data.dtypes.map(pd.api.types.is_numeric_dtype).all():\n raise ValueError(\"Input data must contain only numeric values.\")\n\n return data, outcome_column, feature_columns\n","repo_name":"google/tfp-causalimpact","sub_path":"causalimpact/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":8167,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"81"} +{"seq_id":"20466270087","text":"import argparse\nimport collections\nimport json\nimport sys\n\nfrom tokenizer import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--tokenizer\", default=\"MorphoditaTokenizer()\", type=str, help=\"Tokenizer to use\")\nargs = parser.parse_args()\n\ntokenizer = eval(args.tokenizer)\n\nanchors, not_mapped, lens = 0, 0, []\nnot_mapped_dict = collections.defaultdict(lambda: [])\nlonger_dict = collections.defaultdict(lambda: 0)\n\nfor line in sys.stdin:\n data = json.loads(line)\n text = data[\"input\"]\n\n tokenized_starts, tokenized_ends = {}, {}\n for i, (start, end) in enumerate(tokenizer.tokenize(text)):\n tokenized_starts[start] = i\n tokenized_ends[end] = i\n\n froms, tos = collections.defaultdict(lambda: 0), collections.defaultdict(lambda: 0)\n for node in data[\"nodes\"]:\n if \"anchors\" in node:\n for anchor in node[\"anchors\"]:\n start, end = anchor[\"from\"], anchor[\"to\"]\n\n anchors += 1\n if start not in tokenized_starts or end not in tokenized_ends:\n not_mapped += 1\n not_mapped_dict[text[start:end]].append(text)\n else:\n difference = tokenized_ends[end] - tokenized_starts[start]\n while difference >= len(lens): lens.append(0)\n lens[difference] += 1\n if difference:\n token = []\n for i in range(start, end):\n if i in tokenized_starts: token.append(\"<\")\n token.append(text[i])\n if i + 1 in tokenized_ends: token.append(\">\")\n longer_dict[\"\".join(token)] += 1\n\nprint(\"Anchors\", anchors)\nprint(\"Missing\", not_mapped, \"{:.2f}%\".format(100 * not_mapped / anchors))\nprint(\"Lens\", *[\"{}: {} ({:.2f}%)\".format(i, value, 100 * value / anchors) for i, value in enumerate(lens)])\n\nfor not_mapped, examples in sorted(not_mapped_dict.items(), key=lambda kv: len(kv[1]), reverse=True)[:10]:\n print(not_mapped, len(examples), examples[0])\n\nfor longer, examples in sorted(longer_dict.items(), key=lambda kv: kv[1], reverse=True)[:10]:\n print(longer, examples)\n","repo_name":"ufal/mrpipe-conll2019","sub_path":"src/tokenizer_test.py","file_name":"tokenizer_test.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"20482563329","text":"# notmysol # TC O(n*m) where n is the size of s and m is the max size of the counter # SC O(m) m is the max size of the counter\n# take a look at this image here (explain the algo) : https://github.com/rtn75000/leetcode-pb/blob/main/438.%20Find%20All%20Anagrams%20in%20a%20String/image.md\nfrom collections import Counter\nclass Solution:\n def findAnagrams(self, s: str, p: str) -> List[int]:\n ns, np = len(s), len(p)\n if ns < np:\n return []\n\n p_count = Counter(p)\n s_count = Counter()\n \n output = []\n # sliding window on the string s\n for i in range(ns):\n # add one more letter \n # on the right side of the window\n s_count[s[i]] += 1\n # remove one letter \n # from the left side of the window\n if i >= np:\n if s_count[s[i - np]] == 1:\n del s_count[s[i - np]]\n else:\n s_count[s[i - np]] -= 1\n # compare array in the sliding window\n # with the reference array\n if p_count == s_count:\n output.append(i - np + 1)\n \n return output\n","repo_name":"rtn75000/leetcode-pb","sub_path":"438. Find All Anagrams in a String/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42593249587","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport argparse\nimport struct\nimport binascii\nimport multiprocessing\nimport time\n\nfrom multiprocessing import Process, Queue, Event, freeze_support\nfrom threading import Thread\nfrom queue import Empty, Full # for exception catching\n\nimport pymavlink.dialects.v10.lapwing as mavlink\nimport pymavlink.mavutil as mavutil\n\nmavutil.set_dialect(\"lapwing\")\n\nTOTAL_PIDS = 16\n\nclass AcquiredParam(object):\n def __init__(self, name, var):\n self.name = name\n self.var = var\n\n\nclass CommWorker(Thread):\n\n def __init__(self, to_pnc, from_pnc, gui, udpin, udpout):\n Thread.__init__(self)\n self.to_pnc = to_pnc\n self.from_pnc = from_pnc\n self.gui = gui\n self.udpin = udpin\n self.udpout = udpout\n self.__stop = Event()\n self.param_list = []\n for i in range(0, TOTAL_PIDS):\n s = ('PID_%02d_' % i)\n self.param_list.append(s + 'P')\n self.param_list.append(s + 'I')\n self.param_list.append(s + 'D')\n self.param_list.append(s + 'Min')\n self.param_list.append(s + 'Max')\n self.param_list.append(s + 'proc')\n\n def stop(self):\n self.__stop.set()\n\n\n def run(self):\n print(\"Communication worker started\")\n\n mavin = mavutil.mavlink_connection(self.udpin)\n mavout = mavutil.mavlink_connection(self.udpout)\n recv = None\n\n # wait heartbeat\n m = mavin.recv_match(type=\"HEARTBEAT\", blocking=True, timeout=5)\n if m is not None:\n self.from_pnc.put_nowait(m)\n self.gui.event_generate('<>', when='tail')\n mavout.target_system = m.get_srcSystem()\n else:\n self.gui.update(\"Connection time is out\")\n return\n\n # acquire PIDs settings\n self._recv_param_all(mavin, mavout)\n\n # main working loop\n while not (self.__stop.is_set()):\n recv = None\n try:\n recv = self.to_pnc.get_nowait()\n except Empty:\n pass\n if recv is not None:\n pass\n time.sleep(0.02)\n\n # that's all\n print(\"Communication worker stopped\")\n\n\n def _recv_param_value(self, name, timeout, retry, mavin, mavout):\n while retry > 0:\n mavout.param_fetch_one(bytearray(name, \"ascii\"))\n print(\"Trying to get:\", name)\n m = mavin.recv_match(type=\"PARAM_VALUE\", blocking=True, timeout=timeout)\n if m is None:\n retry -= 1\n print (\"Time is out. Retrying:\", retry)\n continue\n st = m.param_id.decode(\"utf-8\")\n st = st[0:len(name)]\n if st == name:\n if m.param_type == 9 or m.param_type == 10:\n return float(m.param_value)\n else:\n b = struct.pack('>', when='tail')\n except:\n self.__stop.set()\n return\n\n\n def _acqure(self):\n return\n\n def _save(self, param):\n return\n\n def _write_rom(self, param):\n return\n\n","repo_name":"barthess/u2","sub_path":"tools/alcoi_pulse/commworker.py","file_name":"commworker.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27585037497","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef Check(auth, mirrors):\n response = requests.get(\"http://www.portaudio.com/download.html\")\n html = response.content.decode(\"utf-8\")\n parsedHtml = BeautifulSoup(html, \"html.parser\")\n\n links = parsedHtml.find_all(\"a\")\n\n for link in links:\n if link[\"href\"].find(\"archives/pa_stable_\") != -1:\n strongs = link.findChildren(\"strong\")\n if len(strongs) == 1:\n versionText = strongs[0].text.split(\"_\")[2]\n major = int(versionText[1:3])\n minor = int(versionText[3:5])\n patch = int(versionText[5:7])\n return \"%i.%i.%i\" % (major, minor, patch)\n","repo_name":"edomin/vgazer","sub_path":"vgazer/version/custom_checker/portaudio.py","file_name":"portaudio.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25469963207","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport pandas as pd\n\napp = dash.Dash(__name__)\n\n# Read the data \ndf = pd.read_csv('parliamentary-constituency-profiles-data.csv', usecols=[ \"Parliamentary Constituency\", 'Persons-2008'])\n\n#cleaning data\ndf = df.dropna()\ndf = df.reset_index(drop=True)\n#Removing the total\ndf = df[:-2]\n\n# Create the figure\nfig = go.Figure(data=go.Scatter(x=df[\"Parliamentary Constituency\"],\n y=df['Persons-2008'],\n mode='markers',\n marker_color=df['Persons-2008'],\n )) \n\nfig.update_layout(title='Population in Greater London')\n\napp.layout = html.Div(children=[\n html.H1(children='Visualization of Population in Greater London'),\n\n html.Div(children='''\n A scatter plot of population in Greater London\n '''),\n\n dcc.Graph(\n id='example-graph',\n figure= fig,\n style={'width': '200vh', 'height': '90vh'}\n \n )\n])\n\nif __name__ == '__main__':\n app.run_server(debug=True)","repo_name":"hzhasnal/Testing","sub_path":"plotly_dash_graph_2.py","file_name":"plotly_dash_graph_2.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20523771529","text":"import logging\nimport time\nimport tempfile\nimport gc\n\nimport numpy as np\n\nfrom .results import unique_filename\nfrom .config import get_config, set_mpl_rcparams\nfrom pymeasure.log import setup_logging, console_log\nfrom pymeasure.experiment import Results, Worker\n\nlog = logging.getLogger()\nlog.addHandler(logging.NullHandler())\n\ntry:\n from IPython import display\nexcept ImportError:\n log.warning(\"IPython could not be imported\")\n\n\ndef get_array(start, stop, step):\n \"\"\"Returns a numpy array from start to stop\"\"\"\n step = np.sign(stop - start) * abs(step)\n return np.arange(start, stop + step, step)\n\n\ndef get_array_steps(start, stop, numsteps):\n \"\"\"Returns a numpy array from start to stop in numsteps\"\"\"\n return get_array(start, stop, (abs(stop - start) / numsteps))\n\n\ndef get_array_zero(maxval, step):\n \"\"\"Returns a numpy array from 0 to maxval to -maxval to 0\"\"\"\n return np.concatenate((np.arange(0, maxval, step), np.arange(maxval, -maxval, -step),\n np.arange(-maxval, 0, step)))\n\n\ndef create_filename(title):\n \"\"\"\n Create a new filename according to the style defined in the config file.\n If no config is specified, create a temporary file.\n \"\"\"\n config = get_config()\n if 'Filename' in config._sections.keys():\n filename = unique_filename(suffix='_%s' % title, **config._sections['Filename'])\n else:\n filename = tempfile.mktemp()\n return filename\n\n\nclass Experiment:\n \"\"\" Class which starts logging and creates/runs the results and worker processes.\n\n .. code-block:: python\n\n procedure = Procedure()\n experiment = Experiment(title, procedure)\n experiment.start()\n experiment.plot_live('x', 'y', style='.-')\n\n for a multi-subplot graph:\n\n import pylab as pl\n ax1 = pl.subplot(121)\n experiment.plot('x','y',ax=ax1)\n ax2 = pl.subplot(122)\n experiment.plot('x','z',ax=ax2)\n experiment.plot_live()\n\n :var value: The value of the parameter\n\n :param title: The experiment title\n :param procedure: The procedure object\n :param analyse: Post-analysis function, which takes a pandas dataframe as input and\n returns it with added (analysed) columns. The analysed results are accessible via\n experiment.data, as opposed to experiment.results.data for the 'raw' data.\n :param _data_timeout: Time limit for how long live plotting should wait for datapoints.\n \"\"\"\n\n def __init__(self, title, procedure, analyse=(lambda x: x)):\n self.title = title\n self.procedure = procedure\n self.measlist = []\n self.port = 5888\n self.plots = []\n self.figs = []\n self._data = []\n self.analyse = analyse\n self._data_timeout = 10\n\n config = get_config()\n set_mpl_rcparams(config)\n if 'Logging' in config._sections.keys():\n self.scribe = setup_logging(log, **config._sections['Logging'])\n else:\n self.scribe = console_log(log)\n self.scribe.start()\n\n self.filename = create_filename(self.title)\n log.info(\"Using data file: %s\" % self.filename)\n\n self.results = Results(self.procedure, self.filename)\n log.info(\"Set up Results\")\n\n self.worker = Worker(self.results, self.scribe.queue, logging.DEBUG)\n log.info(\"Create worker\")\n\n def start(self):\n \"\"\"Start the worker\"\"\"\n log.info(\"Starting worker...\")\n self.worker.start()\n\n @property\n def data(self):\n \"\"\"Data property which returns analysed data, if an analyse function\n is defined, otherwise returns the raw data.\"\"\"\n self._data = self.analyse(self.results.data.copy())\n return self._data\n\n def wait_for_data(self):\n \"\"\"Wait for the data attribute to fill with datapoints.\"\"\"\n t = time.time()\n while self.data.empty:\n time.sleep(.1)\n if (time.time() - t) > self._data_timeout:\n log.warning('Timeout, no data received for liveplot')\n return False\n return True\n\n def plot_live(self, *args, **kwargs):\n \"\"\"Live plotting loop for jupyter notebook, which automatically updates\n (an) in-line matplotlib graph(s). Will create a new plot as specified by input\n arguments, or will update (an) existing plot(s).\"\"\"\n if self.wait_for_data():\n if not (self.plots):\n self.plot(*args, **kwargs)\n while not self.worker.should_stop():\n self.update_plot()\n display.clear_output(wait=True)\n if self.worker.is_alive():\n self.worker.terminate()\n self.scribe.stop()\n\n def plot(self, *args, **kwargs):\n \"\"\"Plot the results from the experiment.data pandas dataframe. Store the\n plots in a plots list attribute.\"\"\"\n if self.wait_for_data():\n kwargs['title'] = self.title\n ax = self.data.plot(*args, **kwargs)\n self.plots.append({'type': 'plot', 'args': args, 'kwargs': kwargs, 'ax': ax})\n if ax.get_figure() not in self.figs:\n self.figs.append(ax.get_figure())\n self._user_interrupt = False\n\n def clear_plot(self):\n \"\"\"Clear the figures and plot lists.\"\"\"\n for fig in self.figs:\n fig.clf()\n for pl in self.plots:\n pl.close()\n self.figs = []\n self.plots = []\n gc.collect()\n\n def update_plot(self):\n \"\"\"Update the plots in the plots list with new data from the experiment.data\n pandas dataframe.\"\"\"\n try:\n self.data\n for plot in self.plots:\n ax = plot['ax']\n if plot['type'] == 'plot':\n x, y = plot['args'][0], plot['args'][1]\n if type(y) == str:\n y = [y]\n for yname, line in zip(y, ax.lines):\n self.update_line(ax, line, x, yname)\n\n display.clear_output(wait=True)\n display.display(*self.figs)\n time.sleep(0.1)\n except KeyboardInterrupt:\n display.clear_output(wait=True)\n display.display(*self.figs)\n self._user_interrupt = True\n\n def update_line(self, ax, hl, xname, yname):\n \"\"\"Update a line in a matplotlib graph with new data.\"\"\"\n del hl._xorig, hl._yorig\n hl.set_xdata(self._data[xname])\n hl.set_ydata(self._data[yname])\n ax.relim()\n ax.autoscale()\n gc.collect()\n\n def __del__(self):\n self.scribe.stop()\n if self.worker.is_alive():\n self.worker.recorder_queue.put(None)\n self.worker.monitor_queue.put(None)\n self.worker.stop()\n","repo_name":"pymeasure/pymeasure","sub_path":"pymeasure/experiment/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":6788,"program_lang":"python","lang":"en","doc_type":"code","stars":514,"dataset":"github-code","pt":"81"} +{"seq_id":"10006199706","text":"### File docstring and test function\n\nclass NoBadWordsLogitsProcessor(SequenceBiasLogitsProcessor):\n \"\"\"\n [`LogitsProcessor`] that enforces that specified sequences will never be selected.\n\n \n\n In order to get the token ids of the words that should not appear in the generated text, make sure to set\n `add_prefix_space=True` when initializing the tokenizer, and use `tokenizer(bad_words,\n add_special_tokens=False).input_ids`. The `add_prefix_space` argument is only supported for some slow tokenizers,\n as fast tokenizers' prefixing behaviours come from `pre tokenizers`. Read more\n [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers).\n\n \n\n Args:\n bad_words_ids (`List[List[int]]`):\n List of list of token ids that are not allowed to be generated.\n eos_token_id (`Union[int, List[int]]`):\n The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens\n that comes from the instantiated generation configuration file\n\n Examples:\n\n ```python\n >>> from transformers import AutoTokenizer, AutoModelForCausalLM\n\n >>> model = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n >>> tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n >>> inputs = tokenizer([\"In a word, the cake is a\"], return_tensors=\"pt\")\n \n >>> summary_ids = model.generate(inputs[\"input_ids\"], max_new_tokens=5 , pad_token_id=tokenizer.eos_token_id)\n >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True)[0])\n In a word, the cake is a bit of a mess.\n\n ## Now let´s control generation taking the bad words out. Please note that the tokenizer is initialized differently\n >>> tokenizer_with_prefix_space = AutoTokenizer.from_pretrained(\"gpt2\", add_prefix_space=True)\n\n >>> def get_tokens_as_list(word_list):\n \"Converts a sequence of words into a list of tokens \"\n tokens_list = []\n for word in word_list.split(\" \"):\n tokenized_word = tokenizer_with_prefix_space([word], add_special_tokens=False).input_ids[0]\n tokens_list.append(tokenized_word)\n return tokens_list\n\n >>> word_list = \"mess\"\n >>> bad_words_ids = get_tokens_as_list(word_list=word_list)\n\n >>> badwords_ids = model.generate(inputs[\"input_ids\"], max_new_tokens=5, bad_words_ids=bad_words_ids, eos_token_id=tokenizer_with_prefix_space.eos_token_id)\n >>> print(tokenizer.batch_decode(badwords_ids, skip_special_tokens=True)[0])\n In a word, the cake is a bit of a surprise.\n\n >>> badwords_ids = model.generate(inputs[\"input_ids\"], max_new_tokens=4, num_beams=5, bad_words_ids=bad_words_ids)\n >>> print(tokenizer.batch_decode(biased_ids, skip_special_tokens=True)[0])\n In a word, the cake is a great way to start\n\n\n # Now let´s try with a sequence of words.\n >>> from transformers import AutoTokenizer, AutoModelForCausalLM\n >>> model = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n >>> inputs = tokenizer([\"the cake is a\"], return_tensors=\"pt\" )\n >>> summary_ids = model.generate(inputs[\"input_ids\"], max_new_tokens=12 , pad_token_id=tokenizer.eos_token_id)\n >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True)[0])\n the cake is a bit of a mess, but it's not a problem.\n\n >>> tokenizer_with_prefix_space = AutoTokenizer.from_pretrained(\"gpt2\", add_prefix_space=True)\n \n >>> word_list = \"but it's not a problem.\"\n >>> bad_words_ids = get_tokens_as_list(word_list=word_list)\n\n >>> badwords_ids = model.generate(inputs[\"input_ids\"], max_new_tokens=12, bad_words_ids=bad_words_ids)\n >>> print(tokenizer.batch_decode(badwords_ids, skip_special_tokens=True)[0])\n\n the cake is a bit of a mystery, but I think it's a good idea\n ```\n \"\"\"\n\n def __init__(self, bad_words_ids: List[List[int]], eos_token_id: Union[int, List[int]]):\n self.bad_word_ids = bad_words_ids\n self._validate_arguments()\n\n # Filter EOS token from bad_words_ids\n if eos_token_id is None:\n eos_token_id = []\n if isinstance(eos_token_id, int):\n eos_token_id = [eos_token_id]\n bad_words_ids = list(\n filter(lambda bad_token_seq: all(bad_token_seq != [i] for i in eos_token_id), bad_words_ids)\n )\n\n # Forbidding a sequence is equivalent to setting its bias to -inf\n sequence_bias = {tuple(sequence): float(\"-inf\") for sequence in bad_words_ids}\n super().__init__(sequence_bias=sequence_bias)\n\n def _validate_arguments(self):\n bad_words_ids = self.bad_word_ids\n if not isinstance(bad_words_ids, list) or len(bad_words_ids) == 0:\n raise ValueError(f\"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.\")\n if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids):\n raise ValueError(f\"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.\")\n if any(\n any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids)\n for bad_word_ids in bad_words_ids\n ):\n raise ValueError(\n f\"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}.\"\n )\n","repo_name":"SoyGema/contrib_schema","sub_path":"docstring.py","file_name":"docstring.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39978464677","text":"import os\nimport tempfile\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nfrom collections import OrderedDict\nfrom typing import Optional\n\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom torch import nn\n\nfrom mmcompression.core.evaluation.metrics import psnr\n\n\nclass BaseCompressor(nn.Module):\n \"\"\"Base class for compressors.\"\"\"\n\n __metaclass__ = ABCMeta\n\n def __init__(self):\n super().__init__()\n self.cache_files = []\n\n def _get_tmp_file(self, suffix: str = None):\n \"\"\"\n Get a temporary file.\n Args:\n suffix (str): The suffix of the temporary file.\n Returns:\n str: The path of the temporary file.\n \"\"\"\n tmp_file = os.path.join(\n tempfile.gettempdir(), next(tempfile._get_candidate_names())\n )\n tmp_file = tmp_file + f\"_{os.getpid()}\" + f\".{suffix}\"\n self.cache_files.append(tmp_file)\n return tmp_file\n\n def __del__(self):\n \"\"\"\n Delete the temporary files.\n \"\"\"\n for p in self.cache_files:\n try:\n os.remove(p)\n except OSError:\n pass\n self.cache_files.clear()\n\n @staticmethod\n def calculate_bits_num(bitstreams: dict):\n \"\"\"Calculate the bits of the compressed image.\n Args:\n bitstreams (dict): The bitstreams of the compressed image.\n Returns:\n bits_num (int): The bits number of the compressed image.\n \"\"\"\n bits_num = 0\n for k, v in bitstreams.items():\n bits_num += len(v) * 8\n return bits_num\n\n @staticmethod\n def calculate_psnr(img1: torch.Tensor, img2: torch.Tensor):\n \"\"\"Calculate the PSNR between two images.\n Args:\n img1 (Tensor): The first image within shape of (C, H, W).\n img2 (Tensor): The second image within shape of (C, H, W).\n Typically, these should be scaled into zero to one.\n Returns:\n float: The PSNR between two images.\n \"\"\"\n img1 = img1.detach().cpu().numpy() * 255.\n img2 = img2.detach().cpu().numpy() * 255.\n return psnr(img1, img2, input_order='CHW')\n\n def save_img(self, img: torch.Tensor):\n \"\"\"Save the image to a temporary file.\n Args:\n img (Tensor): The image to be saved within shape of (C, H, W).\n Typically, these should be scaled into zero to one.\n Returns:\n str: The path of the temporary file.\n \"\"\"\n tmp_file = self._get_tmp_file(suffix='png')\n img = img.detach().cpu().numpy() * 255.\n img = np.transpose(img, (1, 2, 0)).astype(np.uint8)\n # rgb to bgr\n img = img[..., ::-1]\n mmcv.imwrite(tmp_file, img)\n return tmp_file\n\n @abstractmethod\n def compress(self, img: Optional):\n \"\"\"\n Args:\n img (Optional): np.ndarray or Tensor. The original images of shape (C, H, W).\n Typically, these should be scaled into zero to one within RGB format.\n Returns:\n bitstreams (dict): The compressed bitstreams.\n \"\"\"\n\n @abstractmethod\n def decompress(self, bitstreams: dict):\n \"\"\"\n Args:\n bitstreams (dict): The compressed bitstreams.\n Returns:\n img (Optional): np.ndarray or Tensor. The recovered image of shape (C, H, W).\n \"\"\"\n\n @abstractmethod\n def forward_train(self, img: torch.FloatTensor, img_metas: list, **kwargs):\n \"\"\"\n Args:\n img (Tensor): The original images of shape (N, C, H, W).\n Typically, these should be scaled into zero to one within RGB format.\n img_metas (list[dict]): List of image info dict where each dict\n has: 'filename' and may also contain other keys.\n For details on the values of these keys, see\n :class:`mmcompress.datasets.pipelines.Collect`.\n kwargs (keyword arguments): Specific to concrete implementation.\n \"\"\"\n\n @abstractmethod\n def forward_test(self, img: torch.FloatTensor, img_metas: list, return_image: bool = False, **kwargs):\n \"\"\"\n Args:\n img (Tensor): The original images of shape (N, C, H, W).\n Typically, these should be scaled into zero to one within RGB format.\n img_metas (list[dict]): List of image info dict where each dict\n has: 'filename' and may also contain other keys.\n For details on the values of these keys, see\n :class:`mmcompress.datasets.pipelines.Collect`.\n return_image (bool): whether to return the image after compress-decompress.\n kwargs (keyword arguments): Specific to concrete implementation.\n \"\"\"\n\n def forward(self, img: torch.FloatTensor, img_metas: list, return_loss=True, return_image=False, **kwargs):\n \"\"\"\n Args:\n img (Tensor): The original images of shape (N, C, H, W).\n Typically, these should be scaled into zero to one.\n img_metas (list[dict]): List of image info dict where each dict\n has: 'filename' and may also contain other keys.\n For details on the values of these keys, see\n :class:`mmcompress.datasets.pipelines.Collect`.\n return_loss (bool): whether to return the loss.\n return_image (bool): whether to return the image after compress-decompress.\n kwargs (keyword arguments): Specific to concrete implementation.\n \"\"\"\n if return_loss:\n return self.forward_train(img, img_metas, **kwargs)\n else:\n return self.forward_test(img, img_metas, return_image, **kwargs)\n\n def train_step(self, data_batch: dict, optimizer: torch.optim.Optimizer):\n \"\"\"The iteration step during training.\n This method defines an iteration step during training, except for the\n back propagation and optimizer updating, which are done in an optimizer\n hook. Note that in some complicated cases or models, the whole process\n including back propagation and optimizer updating is also defined in\n this method, such as GAN.\n\n Args:\n data_batch (dict): The output of dataloader.\n optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of\n runner is passed to ``train_step()``. This argument is unused\n and reserved.\n\n Returns:\n dict: It should contain at least 3 keys: ``loss``, ``log_vars``,\n ``num_samples``.\n ``loss`` is a tensor for back propagation, which can be a\n weighted sum of multiple losses.\n ``log_vars`` contains all the variables to be sent to the\n logger.\n ``num_samples`` indicates the batch size (when the model is\n DDP, it means the batch size on each GPU), which is used for\n averaging the logs.\n \"\"\"\n losses = self(**data_batch)\n loss, log_vars = self._parse_losses(losses)\n\n outputs = dict(\n loss=loss,\n log_vars=log_vars,\n num_samples=len(data_batch['img_metas']))\n\n return outputs\n\n def val_step(self, data_batch: dict, **kwargs):\n \"\"\"The iteration step during validation.\n This method shares the same signature as :func:`train_step`, but used\n during val epochs. Note that the evaluation after training epochs is\n not implemented with this method, but an evaluation hook.\n \"\"\"\n output = self(**data_batch)\n return output\n\n @staticmethod\n def _parse_losses(losses):\n \"\"\"Parse the raw outputs (losses) of the network.\n Args:\n losses (dict): Raw output of the network, which usually contain\n losses and other necessary information.\n Returns:\n tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor\n which may be a weighted sum of all losses, log_vars contains\n all the variables to be sent to the logger.\n \"\"\"\n log_vars = OrderedDict()\n for loss_name, loss_value in losses.items():\n if isinstance(loss_value, torch.Tensor):\n log_vars[loss_name] = loss_value.mean()\n elif isinstance(loss_value, list):\n log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)\n else:\n raise TypeError(\n f'{loss_name} is not a tensor or list of tensors')\n\n loss = sum(_value for _key, _value in log_vars.items()\n if 'loss' in _key)\n\n log_vars['loss'] = loss\n for loss_name, loss_value in log_vars.items():\n # reduce loss when distributed training\n if dist.is_available() and dist.is_initialized():\n loss_value = loss_value.data.clone()\n dist.all_reduce(loss_value.div_(dist.get_world_size()))\n log_vars[loss_name] = loss_value.item()\n\n return loss, log_vars\n\n def show_result(self,\n img,\n rec_img,\n win_name='',\n show=False,\n wait_time=0,\n out_file=None):\n \"\"\"Draw `result` over `img`.\n Args:\n img (str or Tensor): The original image.\n rec_img (str or Tensor): The recovered image.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n Default: 0.\n show (bool): Whether to show the image.\n Default: False.\n out_file (str or None): The filename to write the image.\n Default: None.\n Returns:\n img (Tensor): Only if not `show` or `out_file`\n \"\"\"\n img = mmcv.imread(img)\n img = img.copy()\n rec_img = mmcv.imread(rec_img)\n rec_img = rec_img.copy()\n img = np.vstack([img, rec_img])\n\n # if out_file specified, do not show image in window\n if out_file is not None:\n show = False\n\n if show:\n mmcv.imshow(img, win_name, wait_time)\n if out_file is not None:\n mmcv.imwrite(img, out_file)\n\n if not (show or out_file):\n warnings.warn('show==False and out_file is not specified, only '\n 'result image will be returned')\n return img\n","repo_name":"lizhihao6/MMCompression_open","sub_path":"mmcompression/models/compressors/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":10551,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"1280676297","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url\n\nfrom tech_support.ticket.views import (\n\tTicketListView, TicketDetailView, create_ticket,\n\tchange_ticket, statistic)\n\nurlpatterns = patterns('',\n\turl(r'^$', TicketListView.as_view(), name='ticket'),\n\turl(r'^(\\d+)/$', TicketDetailView, name='ticket_id'),\n\turl(r'^create_ticket/$', create_ticket),\n\turl(r'^change_ticket/(\\d+)/$', change_ticket),\n\turl(r'^statistic/$', statistic),\n)","repo_name":"dimanf/support","sub_path":"tech_support/ticket/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12135934488","text":"# Authors: Koye Sodipo \n# License: MIT\n\n\nimport csv\nimport gc\nfrom sklearn import preprocessing\nfrom random import randint\nfrom scipy import stats\nfrom dateutil.parser import parse\n\n\n# Two fundamental problems with determining whether 1st row is header:\n# 1.) If all elements (including the header) are numbers, code will think header is just any other data point\n# 2.) If all elements (including the header) are strings, code will think\n# header is just any othe data point. This can be solved if we assume the\n# header title is unique to the entire column (i.e. occurs only once as\n# header).\n\n\nclass datawiz:\n def __init__(\n self,\n train_path=None,\n test_path=None,\n use=0,\n target_col=-99,\n exclude_cols=[],\n missing_values='fill',\n dt_convert=1,\n pds_chunksize=0,\n advanced_ops=True,\n drop_cols=False):\n\n # Default settings\n file_path = train_path\n test_file_path = test_path\n to_use = use\n use_numpy = True\n use_pandas = False\n use_list = False\n target_column = target_col\n exclude_columns = exclude_cols\n test_split = 0.2\n missing_vals = missing_values\n pd_chunksize = pds_chunksize\n dt_convert = dt_convert\n\n # Removes white space in string columns, datetime conversion\n advanced_ops = advanced_ops\n # Specifies whether recommended columns to be dropped are actually\n # dropped autommatically\n drop_cols = drop_cols\n\n # Advanced Defult settings (not editable through arguments)\n\n # should the date parser consider the first number group ('09') in '09/12/2010' as the day?\n dayfirst = True\n\n array = []\n array_test = []\n ans = -1\n accum = []\n header_or_not = []\n col_is_categorical = []\n col_is_datetime = []\n encoders = []\n header = []\n\n dt_array = []\n dt_array_test = []\n\n while True:\n try:\n if file_path is None:\n file_path = input(\n 'Enter train file path (surround with quotes) :')\n if to_use is None:\n to_use = input(\n 'Enter 0 for numpy, 1 for pandas and 2 for list: ')\n if target_column == -99:\n target_column = input(\n 'Enter index of the the target column. Enter \"None\" if no target: ')\n if exclude_columns == []:\n while True:\n excl = input(\n 'List the index of columns to exclude. Enter \"None\" to quit...')\n if excl is None:\n break\n exclude_columns.append(excl)\n\n ans = 1\n except:\n raise NameError('Please enter valid answers')\n\n if ans == 1 or ans == 0:\n break\n\n use_numpy = True if (\n to_use == 0 or to_use == 'numpy') else False\n use_pandas = True if (\n to_use == 1 or to_use == 'pandas') else False\n use_list = True if (\n to_use == 2 or to_use == 'list') else False\n\n if use_numpy:\n csv_iter = csv.reader(open(file_path, 'rb'))\n data = [row for row in csv_iter]\n array = numpy.array(data)\n del data\n gc.collect()\n\n elif use_pandas:\n if pd_chunksize > 0:\n array = None\n for i, chunk in enumerate(pandas.read_csv(\n file_path, chunksize=pd_chunksize, low_memory=False)):\n if array is None:\n array = chunk.copy() # not simply a reference to it\n else:\n array = pandas.concat([array, chunk])\n del chunk\n gc.collect()\n\n else:\n array = pandas.read_csv(file_path)\n\n elif use_list:\n csv_iter = csv.reader(open(file_path, 'rb'))\n array = [row for row in csv_iter]\n \n\n\ndef read_test(self):\n \n while True:\n ans_t = 1\n try:\n if test_file_path is None:\n test_file_path = input(\n 'Enter file path (surround with quotes) :')\n break\n except:\n raise NameError\n\n # User can reset these by manipulating the class objects directly\n # before calling read_test()\n if use_numpy:\n csv_iter = csv.reader(open(test_file_path, 'rb'))\n data = [row for row in csv_iter]\n array_test = numpy.array(data)\n del data\n gc.collect()\n\n elif use_pandas:\n if pd_chunksize > 0:\n array_test = None\n for i, chunk in enumerate(\n pandas.read_csv(\n array_testtest_file_path, chunksize=pd_chunksize)):\n if array_test is None:\n array_test = chunk.copy() # not simply a reference to it\n else:\n array_test = pandas.concat(\n [array_test, chunk])\n del chunk\n gc.collect()\n\n else:\n array_test = pandas.read_csv(test_file_path)\n\n elif use_list:\n csv_iter = csv.reader(open(test_file_path, 'rb'))\n array_test = [row for row in csv_iter]\n \n\n\ndef process_test(self):\n # array to be returned (as a reference to array_test)\n X_test = array_test\n encoders_local = encoders\n tc = target_column if target_column != - \\\n 1 else len(encoders) - 1\n # Now encoders local should match test columns after we've popped the\n # encoder for the target column\n encoders_local.pop(tc)\n is_header = True if True in header_or_not else False\n adjusted_exclude_columns = []\n\n is_dt_local = col_is_datetime\n is_dt_local.pop(tc)\n\n for i in exclude_columns:\n if i < tc:\n adjusted_exclude_columns.append(i)\n if i > tc: # Because if the target column was in the middle of the train array, the values provided for\n # excl_cols greater than indexof target col\n # ... need to be reduced by 1 since test array would already lacks the target column\n adjusted_exclude_columns.append(i - 1)\n print(len(encoders_local), len(encoders))\n for x in adjusted_exclude_columns:\n encoders_local[x] = 'Dont need this'\n is_dt_local[x] = False\n\n if isinstance(array_test, numpy.ndarray):\n if is_header:\n X_test = array_test[1:]\n else:\n q = None # Completely useless atm. Feel we might need another case here in future\n\n try:\n rng = xrange(0, len(X_test[0, 0:]))\n except NameError:\n rng = range(0, len(X_test[0, 0:]))\n\n for column in rng:\n if column in adjusted_exclude_columns: # no point processing columns we will later exclude\n continue\n\n # If column is categorical but also a datetime, don't convert it\n if not isinstance(encoders_local[column], str) and is_dt_local[column] is False:\n # convert to number labels using LabelEncode\n # print(column)\n if advanced_ops: # remove leading or trailing spaces\n X_test[:, column] = numpy.char.strip(X_test[:, column])\n # output of encoder.transform is a numpy.ndarray, FYI\n X_test[:, column] = encoders_local[column].transform(X_test[:, column], True)\n if dt_convert == 1:\n if is_dt_local[column]:\n dt_array_test.append(numpy.array(\n [parse(i, dayfirst=dayfirst) for i in X_test[:, column]]))\n\n array_of_col_index = [n for n in range(0, len(X_test[0]))]\n # Pick only the columns not listed to be excluded\n X_test = X_test[:, [i for i in array_of_col_index if (\n i not in adjusted_exclude_columns)]]\n\n if isinstance(array_test, pandas.core.frame.DataFrame):\n if is_header:\n X_test = array_test[1:]\n else:\n q = None\n\n # Handle missing values\n if missing_vals == 'fill' or missing_vals == 'drop':\n # Missing values shouldn't be dropped in the test set\n for index, column in enumerate(array_test.columns):\n if col_is_categorical[index]:\n mode = stats.mode(X_test.loc[:][column])[0][0]\n X_test[column] = X_test[column].fillna(mode)\n else:\n mean = numpy.mean(\n X_test[column][\n pandas.notnull(\n X_test[column])])\n X_test[column] = X_test[column].fillna(mean)\n\n for index, column in enumerate(X_test.columns):\n if index in adjusted_exclude_columns: # no point processing columns we will later exclude\n continue\n\n if not isinstance(\n encoders_local[index],\n str) and is_dt_local[index] is False:\n if advanced_ops:\n X_test[column] = X_test[column].str.strip()\n # this back references and actually modifies the ooriginal\n # test.csv in memory\n X_test.loc[:][column] = encoders_local[\n index].transform(X_test[column], True)\n # Attach a datetime object for each column.\n if dt_convert == 1:\n if col_is_datetime[index]:\n dt_array_test.append(pandas.Series(\n [parse(i, dayfirst=dayfirst) for i in X_test[column]]))\n\n for i in adjusted_exclude_columns:\n no_use = X_test.pop(i)\n\n print('len of enc local and dt_local ', len(encoders_local), len(is_dt_local))\n return X_test\n\n\"\"\"def drop(self,cols): #arg \"cols\" can be a single index or array in indexes\n if not hasattr(self, \"is_processed\"):\n raise ValueError(\"datawiz array must be processed before dropping columns.\")\n drop_indexes = []\n if type(cols)== int:\n drop_indexes.append(cols)\n else:\n drop_indexes = cols \"\"\"\n\n\n\n","repo_name":"ksodipo/DataWiz","sub_path":"datawiz/datawiz.py","file_name":"datawiz.py","file_ext":"py","file_size_in_byte":10415,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"81"} +{"seq_id":"14255353207","text":"from typing import List\n\nfrom fastapi import APIRouter, Response\n\nfrom bot.keyboards import pin_inline\nfrom config import TELEGRAM_USER_ID, bot\nfrom db.schemas import SplitwiseItem\n\nrouter = APIRouter(\n prefix='/splitwise',\n)\n\n\n@router.post('')\nasync def process_transaction(items: List[SplitwiseItem]):\n for item in items:\n if not item.payment:\n await bot.send_message(\n chat_id=TELEGRAM_USER_ID,\n text=item.message_view,\n disable_notification=True,\n reply_markup=pin_inline,\n )\n\n return Response()\n\n\n@router.get('')\nasync def ping():\n return Response()\n","repo_name":"vladtsap/splitwise-messages","sub_path":"web/splitwise.py","file_name":"splitwise.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30547623344","text":"\n\nfrom numpy import *\n\ndef linreg(inputs,targets):\n #print(-ones((shape(inputs)[0],1)))\n inputs = concatenate((inputs,-ones((shape(inputs)[0],1))),axis=1) # add input for bias node\n print(inputs)\n beta = dot(dot(linalg.inv(dot(transpose(inputs),inputs)),transpose(inputs)),targets)\n\n outputs = dot(inputs,beta)\n #print shape(beta)\n #print outputs\n return beta\n","repo_name":"AjayKrP/Machine-Learning","sub_path":"CH_2/linreg.py","file_name":"linreg.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25845148359","text":"import sys\nimport numpy as np\nimport torch\n\n# torch.nn.modules useful for defining a MLIAPPY model.\nfrom lammps.mliap.pytorch import TorchWrapper, IgnoreElems\n\n# Read coefficients\ncoeffs = np.genfromtxt(\"Ta06A.mliap.model\",skip_header=6)\n\n# Write coefficients to a pytorch linear model\nbias = coeffs[0]\nweights = coeffs[1:]\nlin = torch.nn.Linear(weights.shape[0],1)\nlin.to(torch.float64)\nwith torch.autograd.no_grad():\n lin.weight.set_(torch.from_numpy(weights).unsqueeze(0))\n lin.bias.set_(torch.as_tensor(bias,dtype=torch.float64).unsqueeze(0))\n\n# Wrap the pytorch model for usage with mliappy coupling.\nmodel = IgnoreElems(lin) # The linear module does not use the types.\nn_descriptors = lin.weight.shape[1]\nn_elements = 1\nlinked_model = TorchWrapper(model,n_descriptors=n_descriptors,n_elements=n_elements)\n\ntorch.save(linked_model,\"Ta06A.mliap.pytorch.model.pt\")\n","repo_name":"lammps/lammps","sub_path":"examples/mliap/convert_mliap_Ta06A.py","file_name":"convert_mliap_Ta06A.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":1860,"dataset":"github-code","pt":"81"} +{"seq_id":"22092014631","text":"from knn import KNN\nimport numpy as np\nimport datasets\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\n\n# load data\nx_train, y_train, x_test, y_test = datasets.gaussian_dataset(n_train=500, n_test=500)\naccuracy = list()\nfor i in range(1, 50, 5):\n model = KNN(k=i+1)\n model.fit(x_train, y_train)\n y_pred = model.predict(x_test)\n accuracy.append(np.mean(y_pred == y_test))\n\nax = plt.figure().gca()\nplt.plot(range(1, 50, 5), np.asarray(accuracy))\nax.xaxis.set_major_locator(MaxNLocator(integer=True))\nplt.xlabel('Value of k --->')\nplt.ylabel('Accuracy --->')\nplt.title('KNN Accuracy v/s iteration curve')\nplt.legend()\nplt.show()\n\n# model = KNN(k=3)\n# model.fit(x_train, y_train)\n#\n# y_pred = model.predict(x_test)\n# print(\"knn accuracy: \" + str(np.mean(y_pred == y_test)))\n","repo_name":"kaushik333/MLAlgorithms","sub_path":"ML Algorithms/run_knn.py","file_name":"run_knn.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5077691828","text":"import json\nfrom unittest import mock\n\nfrom absl.testing import absltest\nimport apiclient\n\n\nfrom multitest_transport.plugins import base\nfrom multitest_transport.plugins import google_drive\nfrom multitest_transport.util import errors\nfrom multitest_transport.util import file_util\n\n\nclass GoogleDriveTest(absltest.TestCase):\n\n def testFindBuildItemPath(self):\n provider = google_drive.GoogleDriveBuildProvider()\n self.assertEqual(\n None, provider.FindBuildItemPath('http://foo.com/bar/zzz'))\n self.assertEqual(\n '_id/1MpKCcIbPWmqwljk26onhQgENlL8lSdjg',\n provider.FindBuildItemPath(\n 'https://drive.google.com/file/d/1MpKCcIbPWmqwljk26onhQgENlL8lSdjg/view'\n ))\n self.assertEqual(\n '_id/1IrGl5_Zoj1mGMIuhzHAJB-lDpFfBWq3H',\n provider.FindBuildItemPath(\n 'https://drive.google.com/open?id=1IrGl5_Zoj1mGMIuhzHAJB-lDpFfBWq3H'\n ))\n\n def testGetFileIds(self):\n \"\"\"Test GetFileIds.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n response = {\n u'kind':\n u'drive#fileList',\n u'incompleteSearch':\n False,\n u'files': [{\n u'mimeType': u'application/vnd.google-apps.document',\n u'name': u'Midterm Eval',\n u'kind': u'drive#file',\n u'id': u'file_id1'\n }, {\n u'mimeType': u'image/jpeg',\n u'name': u'cat.jpg',\n u'kind': u'drive#file',\n u'id': u'file_id2'\n }, {\n u'mimeType': u'application/vnd.google-apps.folder',\n u'name': u'random',\n u'kind': u'drive#file',\n u'id': u'file_id3'\n }]\n }\n mock_api_client = mock.MagicMock()\n mock_api_client.files().list().execute.return_value = response\n provider._client = mock_api_client\n\n child_ids, page_token = provider._GetFileIds(param={})\n self.assertIsNotNone(child_ids)\n self.assertIsNone(page_token)\n self.assertEqual(3, len(child_ids))\n self.assertEqual(child_ids, ['file_id1', 'file_id2', 'file_id3'])\n\n def testGetFileIds_withEmptyResponse(self):\n \"\"\"Test GetFileIds with empty response.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n\n mock_api_client = mock.MagicMock()\n mock_api_client.files().list().execute.return_value = {}\n provider._client = mock_api_client\n\n child_ids, page_token = provider._GetFileIds(param={})\n self.assertIsNotNone(child_ids)\n self.assertIsNone(page_token)\n self.assertEqual(0, len(child_ids))\n self.assertEqual(child_ids, [])\n\n def testGetFileIds_withInvalidFileId(self):\n \"\"\"Test GetFileIds with invalid field id.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n\n mock_api_client = mock.MagicMock()\n side_effect = apiclient.http.HttpError(mock.Mock(status=404), 'not found')\n mock_api_client.files().list().execute.side_effect = side_effect\n provider._client = mock_api_client\n with self.assertRaises(errors.PluginError) as e:\n param = {}\n param['q'] = (\n google_drive._QUERY_ITEM_FORMAT % ('parent_id', 'child_name'))\n provider._GetFileIds(param=param)\n self.assertEqual(e.exception.message,\n google_drive._PARAM_NOT_VALID_ERROR % json.dumps(param))\n\n def testGetFileItem_withInvalidFileId(self):\n \"\"\"Test GetFileItem with invalid file id.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n\n mock_api_client = mock.MagicMock()\n side_effect = apiclient.http.HttpError(mock.Mock(status=404), 'not found')\n mock_api_client.files().get().execute.side_effect = side_effect\n provider._client = mock_api_client\n\n with self.assertRaises(errors.FileNotFoundError) as e:\n provider._GetFileItem(file_id='invalid_file_id')\n self.assertEqual(e.exception.message,\n google_drive._INVALID_FILE_ID_ERROR % 'invalid_file_id')\n\n def testGetFileItem(self):\n \"\"\"Test GetFileItem.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n response = {\n u'mimeType': u'application/vnd.google-apps.folder',\n u'name': u'sample_folder',\n u'modifiedTime': u'2018-06-25T18:24:52.049Z'\n }\n\n mock_api_client = mock.MagicMock()\n mock_api_client.files().get().execute.return_value = response\n provider._client = mock_api_client\n\n file_item = provider._GetFileItem(file_id='sample_folder_id')\n self.assertIsNotNone(file_item)\n self.assertEqual(file_item['mimeType'], google_drive._FOLDER_TYPE)\n self.assertEqual(file_item['name'], 'sample_folder')\n\n def testConvertFileToBuildItem_withFolderObject(self):\n \"\"\"test _ConvertFileToBuildItem with a folder object.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n\n file_item = {\n u'mimeType': u'application/vnd.google-apps.folder',\n u'name': u'sample_folder',\n u'modifiedTime': u'2018-06-25T18:24:52.049Z'\n }\n path = 'folderA/sample_folder/'\n build_item = provider._ConvertFileToBuildItem(file_item, path)\n self.assertIsNotNone(build_item)\n self.assertEqual(build_item.name, 'sample_folder/')\n self.assertEqual(build_item.path, path)\n self.assertFalse(build_item.is_file)\n self.assertEqual(build_item.size, 0)\n self.assertIsNone(build_item.timestamp)\n\n def testConvertFileToBuildItem_withFileObject(self):\n \"\"\"Test _ConvertFileToBuildItem with a file object.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n\n file_item = {\n u'mimeType': u'application/vnd.google-apps.document',\n u'name': u'sample_file.txt',\n u'modifiedTime': u'2018-07-23T05:24:23.624Z'\n }\n path = 'folderA/sample_folder/sample_file.txt'\n build_item = provider._ConvertFileToBuildItem(\n file_item, path)\n self.assertIsNotNone(build_item)\n self.assertEqual(build_item.name, 'sample_file.txt')\n self.assertEqual(build_item.path, path)\n self.assertTrue(build_item.is_file)\n self.assertEqual(build_item.size, 0)\n self.assertIsNotNone(build_item.timestamp)\n\n @mock.patch.object(google_drive.GoogleDriveBuildProvider,\n '_GetFileIds')\n def testGetFileIdHelper_withDuplicatedObjectNames(\n self, mock_get_file_ids):\n \"\"\"Test _GetFileIdHelper with duplicated object names.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n mock_get_file_ids.return_value = (['id_1', 'id_2'], None)\n\n with self.assertRaises(errors.PluginError) as e:\n provider._GetFileIdHelper(parent_folder_id='parent', name='folderA')\n\n self.assertEqual(e.exception.message,\n google_drive._DUPLICATED_OBJECT_NAME_ERROR % 'folderA')\n\n @mock.patch.object(google_drive.GoogleDriveBuildProvider,\n '_GetFileIds')\n def testGetFileIdHelper_withEmptyResults(\n self, mock_get_file_ids):\n \"\"\"Test _GetFileIdHelper with empty reults.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n\n mock_get_file_ids.return_value = ([], None)\n\n with self.assertRaises(errors.FileNotFoundError) as e:\n provider._GetFileIdHelper(parent_folder_id='parent', name='folderA')\n\n self.assertEqual(e.exception.message,\n google_drive._FILE_NOT_FOUND_ERROR % 'folderA')\n\n @mock.patch.object(google_drive.GoogleDriveBuildProvider,\n '_GetFileIds')\n def testGetFileIdHelper(\n self, mock_get_file_ids):\n \"\"\"Test _GetFileIdHelper with empty reults.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n\n mock_get_file_ids.return_value = (['id_1'], None)\n object_id = provider._GetFileIdHelper(\n parent_folder_id='parent', name='folderA')\n\n self.assertIsNotNone(object_id)\n self.assertEqual(object_id, 'id_1')\n\n def testGetFileId_withEmptyPath(self):\n \"\"\"Test _GetFileId from path with empty path.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n folder_id = provider._GetFileId('')\n self.assertEqual(folder_id, 'root')\n\n def testGetFileId_withFileIdPath(self):\n \"\"\"Test _GetFileId with file ID path.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n file_id = 'file_id'\n self.assertEqual(\n file_id,\n provider._GetFileId(google_drive._FILE_ID_PATH_PREFIX + file_id))\n\n @mock.patch.object(google_drive.GoogleDriveBuildProvider,\n '_GetFileIdHelper')\n def testGetFolderIdFromPath_withValidPath(self, mock_get_object_id_helper):\n \"\"\"Test _GetFileId with valid path.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n mock_get_object_id_helper.return_value = 'folderA_valid_id'\n folder_id = provider._GetFileId('folderA/')\n self.assertEqual(folder_id, 'folderA_valid_id')\n\n mock_get_object_id_helper.return_value = 'folderC_valid_id'\n folder_id = provider._GetFileId('folder A/folder B/folder C')\n self.assertEqual(folder_id, 'folderC_valid_id')\n\n @mock.patch.object(google_drive.GoogleDriveBuildProvider,\n '_GetFileItem')\n @mock.patch.object(google_drive.GoogleDriveBuildProvider,\n '_GetFileId')\n def testGetBuildItem_withPathToFile(\n self, mock_get_object_id, mock_get_file_item):\n \"\"\"Test GetBuildItem with path to file.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n mock_get_object_id.return_value = 'valid_id'\n response = {\n u'mimeType': u'application/vnd.google-apps.folder',\n u'name': u'sample_folder',\n u'modifiedTime': u'2018-06-25T18:24:52.049Z'\n }\n mock_get_file_item.return_value = response\n build_item = provider.GetBuildItem('folderA/folderB/sample_folder')\n self.assertIsNotNone(build_item)\n self.assertEqual(build_item.name, 'sample_folder/')\n self.assertEqual(build_item.path, 'folderA/folderB/sample_folder')\n self.assertFalse(build_item.is_file)\n self.assertEqual(build_item.size, 0)\n self.assertIsNone(build_item.timestamp)\n\n @mock.patch.object(google_drive.GoogleDriveBuildProvider,\n '_GetFileItem')\n @mock.patch.object(google_drive.GoogleDriveBuildProvider,\n '_GetFileIds')\n @mock.patch.object(google_drive.GoogleDriveBuildProvider,\n '_GetFileId')\n def testListBuildItems(\n self, mock_get_object_id, mock_get_file_ids, mock_get_file_item):\n \"\"\"Test ListBuildItems.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n mock_get_object_id.return_value = 'valid_id'\n mock_get_file_ids.return_value = (['id_1', 'id_2'], None)\n response = {\n u'mimeType': u'application/vnd.google-apps.folder',\n u'name': u'sample_folder',\n u'modifiedTime': u'2018-06-25T18:24:52.049Z'\n }\n mock_get_file_item.return_value = response\n build_items, next_page_token = provider.ListBuildItems(\n path='', page_token=None)\n self.assertIsNotNone(build_items)\n self.assertEqual(len(build_items), 2)\n self.assertIsNone(next_page_token)\n self.assertEqual(build_items[0].name, 'sample_folder/')\n self.assertEqual(build_items[0].path, 'sample_folder')\n self.assertFalse(build_items[0].is_file)\n self.assertEqual(build_items[0].size, 0)\n self.assertIsNone(build_items[0].timestamp)\n\n @mock.patch.object(google_drive.GoogleDriveBuildProvider, 'GetBuildItem')\n def testDownloadFile_withInvalidFile(self, mock_get_build_item):\n \"\"\"Try to download folder instead of file.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n path = 'fake/path/'\n fake_build_item = base.BuildItem(\n name='fake/path/',\n path='fake/path/',\n is_file=False,\n size=0,\n timestamp=None)\n mock_get_build_item.return_value = fake_build_item\n with self.assertRaises(errors.FileNotFoundError) as e:\n list(provider.DownloadFile(path))\n self.assertEqual(\n e.exception.message, google_drive._FILE_NOT_FOUND_ERROR % path)\n\n @mock.patch.object(google_drive.GoogleDriveBuildProvider, 'GetBuildItem')\n def testDownloadFile_withNoneValue(self, mock_get_build_item):\n \"\"\"Try to download a file that does not exist.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n path = 'fake/path/'\n mock_get_build_item.return_value = None\n with self.assertRaises(errors.FileNotFoundError) as e:\n list(provider.DownloadFile(path))\n self.assertEqual(\n e.exception.message, google_drive._FILE_NOT_FOUND_ERROR % path)\n\n @mock.patch.object(google_drive.GoogleDriveBuildProvider, '_GetFileId')\n @mock.patch.object(google_drive.GoogleDriveBuildProvider, 'GetBuildItem')\n @mock.patch.object(\n apiclient.http.MediaIoBaseDownload, 'next_chunk', autospec=True)\n def testDownloadFile(self, mock_next_chunk, mock_build_item, mock_file_id):\n \"\"\"Test downloading a file with multiple chunks.\"\"\"\n provider = google_drive.GoogleDriveBuildProvider()\n provider._client = mock.MagicMock()\n path = 'fake/path/kitten.png'\n mock_build_item.return_value = base.BuildItem(\n name='fake/path/kitten.png',\n path='fake/path/kitten.png',\n is_file=True,\n size=0,\n timestamp=None)\n mock_file_id.return_value = 'file_id'\n\n # Mock downloader that processes two chunks of data\n mock_progress = iter([\n (b'hello', mock.MagicMock(resumable_progress=5, total_size=10), False),\n (b'world', mock.MagicMock(resumable_progress=10, total_size=10), True),\n ])\n def _next_chunk(self, **_):\n value, status, done = next(mock_progress)\n self._fd.write(value)\n return status, done\n mock_next_chunk.side_effect = _next_chunk\n\n result = list(provider.DownloadFile(path))\n mock_file_id.assert_called_with('fake/path/kitten.png')\n provider._client.files().get_media.assert_called_with(\n supportsAllDrives=True, fileId='file_id'\n )\n expected = [\n file_util.FileChunk(data=b'hello', offset=5, total_size=10),\n file_util.FileChunk(data=b'world', offset=10, total_size=10),\n ]\n self.assertEqual(expected, result)\n\n\nif __name__ == '__main__':\n absltest.main()\n","repo_name":"maksonlee/multitest_transport","sub_path":"multitest_transport/plugins/google_drive_test.py","file_name":"google_drive_test.py","file_ext":"py","file_size_in_byte":13899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26340205028","text":"\"\"\"pwa URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom . import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n \tpath('', views.HomePage.as_view(), name='home'), \n \tpath('test/', views.TestPage.as_view(), name='test'),\n \tpath('thanks/', views.ThanksPage.as_view(), name='thanks'),\n \tpath('accounts/', include('apps.accounts.urls', namespace='accounts')),\n \tpath('accounts/', include('django.contrib.auth.urls')),\n \tpath('survey/', include('apps.survey.urls'), name='survey'), \n \tpath('records/', include('apps.records.urls'), name='records'), \n \tpath('shops/', include('apps.shops.urls'), name='shop'), \n \tpath('articles/', include('apps.articles.urls'), name='articles'), \n path('result/', include('apps.results.urls'), name='result')\n]\n\n\nif settings.DEBUG:\n\timport debug_toolbar\n\turlpatterns = [\n\t\tpath('__debug__/', include(debug_toolbar.urls))\n\t] + urlpatterns","repo_name":"CheolRyu/workspace","sub_path":"pwa/pwa/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5259038172","text":"import itertools\nimport operator\nfrom functools import reduce\nfrom typing import List\n\nwith open(f'{__file__.split(\".\")[0]}.txt') as f:\n trees = [l.strip(\"\\n\") for l in f.readlines()]\n\n\ndef is_tree_visible_from_edge(forest: List[str], row: int, col: int) -> bool:\n \"\"\"\n For the given `forest`, checks whether the trees in between the tree at `row`/`col` and any of the edges\n are all smaller than the tree itself (i.e. whether the tree is visible from any edge).\n \"\"\"\n directions = [\n [forest[row][i] for i in range(col)],\n [forest[row][i] for i in range(col + 1, len(forest[0]))],\n [forest[i][col] for i in range(row)],\n [forest[i][col] for i in range(row + 1, len(forest))]\n ]\n if any((len(neighbors) == 0) for neighbors in directions):\n # Tree is already at an edge, if there are no trees in one of the directions, so it's definitely visible.\n return True\n return any((forest[row][col] > max(neighbors)) for neighbors in directions)\n\n\ndef view_distance(tree_heights: List, max_height: int) -> int:\n \"\"\"\n Iterates `tree_heights` and counts until the n-th element is hit, whose value is equal or greater than `max_height`.\n :return: Amount of consecutive elements in `tree_heights` (starting at index 0), less than `max_height`.\n The final element is included, as that last tree is still visible, despite being higher.\n If no element of `tree_heights` is greater than or equal to `max_height`, the maximum of len(tree_heights) is\n returned, as all trees can be seen then.\n \"\"\"\n if len(tree_heights) == 0:\n return 0\n for distance, height in enumerate(tree_heights, start=1):\n if int(height) >= int(max_height):\n return distance\n # In case no tree is higher, the distance is counted until it hits the edge.\n # In this case, this is the last known view_distance, from the last iteration.\n return len(tree_heights)\n\n\ndef heights_from_viewpoint(forest: List[str], row: int, col: int) -> List[List[str]]:\n \"\"\"\n For the `forest`, lists the height for each tree between the given tree at `row`/`col`,\n starting at that tree and going towards the edges. Each direction is listed separately as a list.\n :return: List of lists, each inner list representing the heights of trees visible in one of the directions.\n \"\"\"\n directions = [\n [forest[row][i] for i in range(col)][::-1], # Reverse to go from coordinates to edge\n [forest[row][i] for i in range(col + 1, len(forest[0]))],\n [forest[i][col] for i in range(row)][::-1], # Reverse to go from coordinates to edge\n [forest[i][col] for i in range(row + 1, len(forest))]\n ]\n return directions\n\n\ndef scenic_score(forest: List[str], row: int, col: int) -> int:\n \"\"\"\n Calculates the scenic score for a given tree at `row`/`col` in the forest, as given by the challenge description.\n \"\"\"\n tree_heights = heights_from_viewpoint(forest, row, col)\n return reduce(operator.mul,\n [view_distance(tree_row, int(forest[row][col])) for tree_row in tree_heights])\n\n\ncoordinates = itertools.product(range(len(trees)), range(len(trees[0])))\nprint(f\"Part 1: {sum(1 for row, col in coordinates if is_tree_visible_from_edge(trees, row, col))}\")\n\ncoordinates = itertools.product(range(len(trees)), range(len(trees[0])))\nprint(f\"Part 2: {max(scenic_score(trees, row, col) for row, col in coordinates)}\")\n","repo_name":"V0idC0de/AdventOfCode","sub_path":"day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44360725013","text":"import argparse\nfrom process_docs import ParsedDocument\nfrom pyterrier_framework import PythonTerrier\n# Expected usage:\n# ./run -q topics.xml -d documents.lst -r run -o sample.res ...\n\n# Where:\n# -q topics.xml -- a file including topics in the TREC format \n# -d documents.lst -- a file including document filenames \n# -r run -- a string identifying the experiment (will be inserted in the\n# result file as \"run_id\")\n# -o sample.res -- an output file \n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Command Line Arguments for Vector Space Model\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n \"-q\", help=\"a file including topics in the TREC format\")\n parser.add_argument(\"-d\", help=\"a file including document filenames\")\n parser.add_argument(\n \"-r\", help=\"a string identifying the experiment (will be inserted in the result file as \\\"run_id\\\"\")\n parser.add_argument(\"-o\", help=\"an output file\")\n\n args = parser.parse_args()\n config = vars(args)\n parsed_doc_df = ParsedDocument()\n parsed_doc_df.process_documents(config['q'], config['d'], config['r'])\n ir_framework = PythonTerrier(parsed_doc_df.parsed_docs, parsed_doc_df.lang, parsed_doc_df.run)\n ir_framework.index_docs()\n parsed_doc_df.create_queries()\n result = ir_framework.query_docs(parsed_doc_df.query_df)\n parsed_doc_df.get_output(result, config['o'], config['r'])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MichelleElizabethK/information-retrieval-with-pyterrier","sub_path":"information-retrieval/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21493462639","text":"import numpy as np\r\nimport random as rd\r\nimport scipy.sparse as sp\r\nfrom time import time\r\nimport os\r\nfrom sklearn.preprocessing import MultiLabelBinarizer\r\nfrom utils.parser import parse_args\r\nimport torch\r\n\r\n# fix seed\r\nargs1 = parse_args()\r\nseed = args1.seed\r\nprint(\"use seed:\", seed)\r\nrd.seed(seed)\r\nos.environ['PYTHONHASHSEED'] = str(seed)\r\nnp.random.seed(seed)\r\ntorch.manual_seed(seed)\r\ntorch.cuda.manual_seed(seed)\r\ntorch.cuda.manual_seed_all(seed)\r\ntorch.backends.cudnn.deterministic = True\r\ntorch.backends.cudnn.benchmark = False\r\n\r\nclass DataSet(object):\r\n def __init__(self, args, path, batch_size):\r\n self.path = path\r\n self.batch_size = batch_size\r\n\r\n train_file = path + '/' + args.train_file\r\n test_file = path + '/' + args.test_file\r\n\r\n sym_pair_file = path + '/symPair-5.txt'\r\n herb_pair_file = path + '/herbPair-40.txt'\r\n\r\n # get number of users and items\r\n self.n_users, self.n_sets, self.n_items = self.load_data_size(args, '{}_data_size.txt')[:3]\r\n self.n_train, self.n_test = 0, self.load_data_num(args, '{}_num.txt')[0]\r\n\r\n # herbs in train\r\n self.train_items = set()\r\n # herbs in test\r\n self.test_items = set()\r\n self.test_all_users = set()\r\n self.all_items = set()\r\n # prescriptions in train\r\n self.train_pres = list()\r\n self.max_item_len = self.load_data_size(args, '{}_item_length.txt')[0]\r\n\r\n self.R = sp.dok_matrix((self.n_users, self.n_items), dtype=np.float32)\r\n\r\n self.test_group_set = list()\r\n self.test_group_set_repeat = list() #\r\n self.test_users = np.zeros((self.n_test, self.n_users), dtype=float)\r\n self.test_items_hot = np.zeros((self.n_test, self.n_items), dtype=float)\r\n self.item_weights = np.zeros((self.n_items, 1), dtype=float)\r\n\r\n self.train_set_list = list() # herb id list [[]]\r\n\r\n self.epoch = args.epoch\r\n\r\n user_item_count = 0\r\n\r\n with open(train_file) as f:\r\n self.train_fang = dict() # key: symptom set value:all herb list\r\n self.train_symset_herbset = dict() # key: symptom set value: [[]] herb list\r\n for l in f.readlines():\r\n if len(l) > 0:\r\n temp = l.strip().split('\\t')\r\n tempS = temp[0].split(\" \")\r\n uids = [int(i) for i in tempS]\r\n tempH = temp[1].split(' ')\r\n items = [int(i) for i in tempH]\r\n self.train_symset_herbset.setdefault(str(uids), []).append(items)\r\n if str(uids) in self.train_fang.keys():\r\n self.train_fang[str(uids)] = list(set(self.train_fang[str(uids)] + items))\r\n else:\r\n self.train_fang[str(uids)] = items\r\n if items not in self.train_set_list:\r\n self.train_set_list.append(items)\r\n\r\n with open(train_file) as f:\r\n for l in f.readlines():\r\n if len(l) > 0:\r\n temp = l.strip().split('\\t')\r\n tempS = temp[0].split(\" \")\r\n tempH = temp[1].split(\" \")\r\n try:\r\n uids = [int(i) for i in tempS]\r\n items = [int(i) for i in tempH]\r\n self.train_pres.append([uids, items])\r\n for item in items:\r\n self.train_items.add(item)\r\n self.all_items.add(item)\r\n self.item_weights[item][0] += 1\r\n for user in uids:\r\n for item in items:\r\n if self.R[user, item] != 1.:\r\n self.R[user, item] = 1.\r\n user_item_count += 1\r\n except Exception:\r\n continue\r\n self.n_train += 1\r\n\r\n print('# user-item count ', user_item_count)\r\n\r\n print('item_weight ', len(self.item_weights))\r\n item_freq_max = self.item_weights.max()\r\n print('item_freq: item_weight.shape[0] and [1]', self.item_weights.shape[0], ' ',\r\n self.item_weights.shape[1])\r\n for index in range(self.item_weights.shape[0]):\r\n self.item_weights[index][0] = item_freq_max * 1.0 / self.item_weights[index][0]\r\n\r\n test_index = 0\r\n self.test_positive_list = list()\r\n with open(test_file) as f:\r\n self.test_fang = dict()\r\n index = 0\r\n fang_index = dict()\r\n for l in f.readlines():\r\n if len(l) > 0:\r\n temp = l.strip().split('\\t')\r\n tempS = temp[0]\r\n tempH = temp[1].split(' ')\r\n items = [int(i) for i in tempH]\r\n self.test_fang.setdefault(tempS, []).append(items)\r\n if str(tempS) in fang_index.keys():\r\n self.test_positive_list.append(fang_index[str(tempS)])\r\n else:\r\n self.test_positive_list.append(index)\r\n fang_index[str(tempS)] = index\r\n index += 1\r\n\r\n with open(test_file) as f:\r\n self.test_users_padding = list()\r\n self.test_items_padding = list()\r\n for l in f.readlines():\r\n if len(l) > 0:\r\n if len(l) == 0: break\r\n l = l.strip('\\n')\r\n temp = l.strip().split('\\t')\r\n tempS = temp[0].split(' ')\r\n tempH = temp[1].split(' ')\r\n uids = [int(i) for i in tempS]\r\n try:\r\n for uid in uids:\r\n self.test_users[test_index][uid] = 1.\r\n self.test_all_users.add(uid)\r\n items = [int(i) for i in tempH]\r\n item_padding = [self.n_items] * (self.max_item_len - len(items))\r\n padding_items = items + item_padding\r\n self.test_items_padding.append(padding_items)\r\n for item in items:\r\n self.test_items.add(item)\r\n self.all_items.add(item)\r\n self.test_items_hot[test_index][item] = 1. # test ground truth 中 herb的multi hot\r\n except Exception:\r\n continue\r\n test_index += 1\r\n uid, test_items = uids, items\r\n user_index = ''\r\n for user in uid:\r\n user_index += str(user) + \"_\"\r\n user_index = user_index[:-1]\r\n self.test_group_set.append([user_index, test_items])\r\n self.test_group_set_repeat.append([user_index, self.test_fang[temp[0]]])\r\n # self.n_test += 1\r\n print(\"#multi-hot for test users\\t\", len(self.test_users))\r\n print(\"#test\\t\", len(self.test_group_set))\r\n print(\"test max item len:\\t\", self.max_item_len)\r\n self.print_statistics()\r\n\r\n self.sym_pair = sp.dok_matrix((self.n_users, self.n_users), dtype=np.float32)\r\n self.herb_pair = sp.dok_matrix((self.n_items, self.n_items), dtype=np.float32)\r\n sym_pair_count = 0\r\n with open(sym_pair_file) as f_sym_pair:\r\n for l in f_sym_pair.readlines():\r\n if len(l) == 0: break\r\n pair = l.strip().split(' ')\r\n sym1 = int(pair[0])\r\n sym2 = int(pair[1])\r\n # print('sym-pair ', sym1, ' ', sym2)\r\n self.sym_pair[sym1, sym2] = 1.\r\n self.sym_pair[sym2, sym1] = 1.\r\n sym_pair_count += 2\r\n\r\n print('# sym pairs ', sym_pair_count)\r\n\r\n herb_pair_count = 0\r\n with open(herb_pair_file) as f_herb_pair:\r\n for l in f_herb_pair.readlines():\r\n if len(l) == 0: break\r\n pair = l.strip().split(' ')\r\n herb1 = int(pair[0])\r\n herb2 = int(pair[1])\r\n # print('herb ', herb1, ' ', herb2)\r\n self.herb_pair[herb1, herb2] = 1.\r\n self.herb_pair[herb2, herb1] = 1.\r\n herb_pair_count += 2\r\n\r\n print('#herb pairs ', herb_pair_count)\r\n\r\n def load_data_size(self, args, name):\r\n with open(os.path.join(self.path, name.format(args.dataset)), 'r') as f:\r\n return [int(s) for s in f.readline().split('\\t')]\r\n\r\n def load_data_num(self, args, name):\r\n with open(os.path.join(self.path, name.format(args.test_file.replace(\".txt\", \"\"))), 'r') as f:\r\n return [int(s) for s in f.readline().split('\\t')]\r\n\r\n def get_adj_mat(self):\r\n try:\r\n t1 = time()\r\n adj_mat = sp.load_npz(self.path + '/s_adj_mat.npz')\r\n norm_adj_mat = sp.load_npz(self.path + '/s_norm_adj_mat.npz')\r\n mean_adj_mat = sp.load_npz(self.path + '/s_mean_adj_mat.npz')\r\n sym_pair_mat = sp.load_npz(self.path + '/s_sym_pair_mat.npz')\r\n herb_pair_mat = sp.load_npz(self.path + '/s_herb_pair_mat.npz')\r\n print('already load sym_pair adjacency matrix', sym_pair_mat.shape)\r\n print('already load herb_pair adjacency matrix', herb_pair_mat.shape)\r\n print('already load adj matrix', adj_mat.shape, time() - t1)\r\n except Exception:\r\n adj_mat, norm_adj_mat, mean_adj_mat, sym_pair_mat, herb_pair_mat = self.create_adj_mat()\r\n sp.save_npz(self.path + '/s_adj_mat.npz', adj_mat)\r\n sp.save_npz(self.path + '/s_norm_adj_mat.npz', norm_adj_mat)\r\n sp.save_npz(self.path + '/s_mean_adj_mat.npz', mean_adj_mat)\r\n sp.save_npz(self.path + '/s_sym_pair_mat.npz', sym_pair_mat)\r\n sp.save_npz(self.path + '/s_herb_pair_mat.npz', herb_pair_mat)\r\n return adj_mat, norm_adj_mat, mean_adj_mat, sym_pair_mat, herb_pair_mat\r\n\r\n def create_adj_mat(self):\r\n t1 = time()\r\n adj_mat = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32)\r\n adj_mat = adj_mat.tolil()\r\n R = self.R.tolil()\r\n\r\n # 双向的原始邻接矩阵\r\n adj_mat[:self.n_users, self.n_users:] = R\r\n adj_mat[self.n_users:, :self.n_users] = R.T\r\n adj_mat = adj_mat.todok()\r\n print('already create adjacency matrix', adj_mat.shape, 'time:', time() - t1)\r\n t2 = time()\r\n\r\n sym_pair_adj_mat = self.sym_pair.tolil().todok()\r\n print('already create sym_pair adjacency matrix', sym_pair_adj_mat.shape, 'time:', time() - t2)\r\n t3 = time()\r\n\r\n herb_pair_adj_mat = self.herb_pair.tolil().todok()\r\n print('already create herb_pair adjacency matrix', herb_pair_adj_mat.shape, 'time:', time() - t3)\r\n\r\n def normalized_adj_single(adj):\r\n # 行归一化 每行的行sum列表\r\n rowsum = np.array(adj.sum(1))\r\n d_inv = np.power(rowsum, -1).flatten()\r\n d_inv[np.isinf(d_inv)] = 0.\r\n d_mat_inv = sp.diags(d_inv)\r\n\r\n # 每一个元素都除上该行的sum\r\n norm_adj = d_mat_inv.dot(adj)\r\n # norm_adj = adj.dot(d_mat_inv)\r\n print('generate single-normalized adjacency matrix.')\r\n return norm_adj.tocoo()\r\n\r\n def normalized_adj_bi(adj):\r\n rowsum = np.array(adj.sum(1))\r\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\r\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\r\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\r\n bi_adj = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt)\r\n return bi_adj\r\n\r\n norm_adj_mat = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0])) # sp.eye单位矩阵\r\n mean_adj_mat = normalized_adj_single(adj_mat)\r\n print('already normalize adjacency matrix', time() - t2)\r\n\r\n print('sym_pair 和 herb_pair 有 self-connection, sum!!')\r\n sym_pair_adj_mat = sym_pair_adj_mat + sp.eye(sym_pair_adj_mat.shape[0])\r\n herb_pair_adj_mat = herb_pair_adj_mat + sp.eye(herb_pair_adj_mat.shape[0])\r\n\r\n return adj_mat.tocsr(), norm_adj_mat.tocsr(), mean_adj_mat.tocsr(), sym_pair_adj_mat.tocsr(), herb_pair_adj_mat.tocsr()\r\n\r\n def sample(self, args):\r\n self.train_items_padding = list()\r\n self.train_users_padding = list()\r\n self.train_positive_list = list()\r\n fang_sample = dict()\r\n fang_items = dict()\r\n sample_ids = [i for i in range(len(self.train_pres))]\r\n # max_batch_item_len = 0\r\n if self.batch_size <= len(sample_ids):\r\n pres_ids = rd.sample(sample_ids, self.batch_size) # sample batch size data\r\n else:\r\n pres_ids = [rd.choice(sample_ids) for _ in range(self.batch_size)]\r\n\r\n users = []\r\n for pres_id in pres_ids:\r\n users.append(self.train_pres[pres_id]) # users: [[[user id set], [item id set]], ...]\r\n # if len(self.train_pres[pres_id][1]) > self.max_item_len:\r\n # self.max_item_len = len(self.train_pres[pres_id][1])\r\n self.data_sample_ids = pres_ids\r\n user_sets = np.zeros((len(users), self.n_users), dtype=float) # multi-hot [B, n_users]\r\n item_sets = np.zeros((len(users), self.n_items), dtype=float) # multi-hot [B, n_items]\r\n self.item_sets_repeat = np.zeros((len(users), self.n_items), dtype=float) # multi-hot [B, n_items]\r\n # self.item_sets_repeat_dataset = np.zeros((len(users), self.n_items), dtype=float) # multi-hot [B, n_items]\r\n user_set = set()\r\n item_set = set()\r\n for index in range(len(users)):\r\n uids = users[index][0]\r\n items = users[index][1]\r\n padding = [self.n_items] * (self.max_item_len - len(items))\r\n padding_items = items + padding\r\n self.train_items_padding.append(padding_items) #\r\n if str(uids) in fang_sample.keys():\r\n self.train_positive_list.append(fang_sample[str(uids)])\r\n else:\r\n self.train_positive_list.append(index)\r\n fang_sample[str(uids)] = index\r\n if str(uids) in fang_items.keys():\r\n fang_items[str(uids)] = list(set(fang_items[str(uids)] + items))\r\n else:\r\n fang_items[str(uids)] = items\r\n # self.train_items_len.append([1] * len(items) + [0] * (self.max_item_len - len(items)))\r\n for uid in uids:\r\n user_sets[index][int(uid)] = 1. # multi-hot\r\n user_set.add(int(uid))\r\n for item in items:\r\n item_sets[index][int(item)] = 1. # multi-hot\r\n item_set.add(int(item))\r\n herb_sets_list = list()\r\n for index in range(len(users)):\r\n uids = users[index][0]\r\n if args.all_dataset == 0:\r\n herb_list = fang_items[str(uids)]\r\n else:\r\n herb_list = self.train_fang[str(uids)]\r\n herb_sets_list.append(herb_list)\r\n self.item_sets_repeat = MultiLabelBinarizer(classes=range(0, self.n_items)).fit_transform(herb_sets_list) # [B, herb_num]\r\n self.item_sets_repeat = np.array(self.item_sets_repeat, dtype=float)\r\n return user_sets, list(user_set), item_sets, list(item_set)\r\n\r\n def print_statistics(self):\r\n print('symptom n_users=%d, herb n_items=%d' % (self.n_users, self.n_items))\r\n print('#train herb train_items %d' % (len(self.train_items)))\r\n print('#test herb test_items %d' % (len(self.test_items)))\r\n print('#test syn test_all_users %d' % (len(self.test_all_users)))\r\n print('#all herb: all_items %d' % (len(self.all_items)))\r\n print('item_max_len:\\t', self.max_item_len)\r\n print('***********************para********************************')\r\n print('lr:', args1.lr)\r\n print('regs:', str(args1.regs))\r\n print('batch_size:', str(args1.batch_size))\r\n print('seed:', args1.seed)\r\n print('step:', args1.step)\r\n print('t=?', args1.t)\r\n print('co_lamda?', args1.co_lamda)\r\n\r\n\r\n\r\n","repo_name":"ywjawmw/CLEPR","sub_path":"utils/load_dataset.py","file_name":"load_dataset.py","file_ext":"py","file_size_in_byte":16433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"605198294","text":"#!/usr/bin/env python\n\nfrom luigi import Task, ExternalTask, Parameter, BoolParameter, IntParameter\nfrom luigi.util import inherits, requires\n\nfrom struct_pipe import Freesurfer, StructMask\n\nfrom plumbum import local\nfrom subprocess import Popen\n\nfrom scripts.util import N_PROC\n\nfrom os.path import dirname, join as pjoin\nfrom _glob import _glob\nfrom glob import glob\n\nfrom _provenance import write_provenance\n\nclass SelectFsDwiFiles(ExternalTask):\n id = Parameter()\n ses = Parameter(default='')\n bids_data_dir = Parameter()\n derivatives_dir = Parameter()\n fs_dirname = Parameter(default='freesurfer')\n dwi_template = Parameter()\n\n def output(self):\n\n derivatives_dir= self.bids_data_dir.replace('rawdata', self.derivatives_dir)\n\n _, dwi = _glob(derivatives_dir, self.dwi_template, self.id, self.ses)\n\n fs_dirname= pjoin(dirname(dwi).replace('/dwi','/anat'), self.fs_dirname)\n\n for suffix in ['XcUnEdEp_dwi', 'XcUnEd_dwi', 'XcUn_dwi', 'Xc_dwi']:\n if suffix in dwi:\n break\n\n\n # look for Qc'ed mask first\n # if not present, return the automated mask\n bse_mask_dict= {\n 'XcUnEdEp_dwi': ['XcUnCNNQcEdEp_mask', 'XcUnCNNEdEp_mask', 'XcUnEdEp_bse'],\n 'XcUnEd_dwi': ['XcUnCNNQc_mask', 'XcUnCNN_mask', 'XcUn_bse'],\n 'XcUn_dwi': ['XcUnCNNQc_mask', 'XcUnCNN_mask', 'XcUn_bse'],\n 'Xc_dwi': ['XcCNNQc_mask', 'XcCNN_mask', 'Xc_bse']\n }\n\n dwidir= dirname(dwi)\n try:\n t= pjoin(dwidir, '*' + bse_mask_dict[suffix][0] + '.nii.gz')\n mask= glob(t)[0]\n except IndexError:\n t= pjoin(dwidir, '*' + bse_mask_dict[suffix][1] + '.nii.gz')\n\n try:\n mask= glob(t)[0]\n except:\n raise FileNotFoundError('Neither *{}.nii.gz nor *{}.nii.gz could be found in {}'\n .format(bse_mask_dict[suffix][0], bse_mask_dict[suffix][1], dwidir))\n\n bse= glob(pjoin(dwidir, '*'+ bse_mask_dict[suffix][2]+'.nii.gz'))[0]\n\n return dict(fsdir=local.path(fs_dirname), dwi=local.path(dwi), bse=local.path(bse), mask=local.path(mask))\n\n\n@inherits(SelectFsDwiFiles,StructMask)\nclass Fs2Dwi(Task):\n\n debug= BoolParameter(default=False)\n mode= Parameter(default='direct')\n\n def requires(self):\n if self.struct_template:\n return self.clone(SelectFsDwiFiles),self.clone(StructMask)\n else:\n return self.clone(SelectFsDwiFiles),\n\n\n def run(self):\n cmd = (' ').join(['fs2dwi.py',\n '-f', self.input()[0]['fsdir'],\n '--bse', self.input()[0]['bse'],\n '--dwimask', self.input()[0]['mask'],\n '-o', self.output()[0].dirname,\n '-d' if self.debug else '',\n 'direct' if self.mode=='direct'\n else 'witht2 --t2 {} --t2mask {}'.format(self.input()[1]['aligned'], self.input()[1]['mask'])])\n p = Popen(cmd, shell=True)\n p.wait()\n\n write_provenance(self, self.output()[0])\n\n def output(self):\n\n wmparc= local.path(self.input()[0]['dwi'].dirname.join('wmparcInDwi.nii.gz').replace('dwi', 'fs2dwi'))\n return wmparc,self.input()[0]['dwi']\n\n\n@requires(Fs2Dwi)\nclass Wmql(Task):\n\n query= Parameter(default='')\n wmql_nproc= IntParameter(default= int(N_PROC))\n\n def run(self):\n # obtain the tract from dwi prefix\n tract= self.input()[1].replace('/dwi/', '/tracts/').replace('_dwi.nii.gz', '.vtk')\n\n cmd = (' ').join(['wmql.py',\n '-f', self.input()[0],\n '-i', tract,\n '-o', self.output(),\n f'-q {self.query}' if self.query else '',\n f'-n {self.wmql_nproc}' if self.wmql_nproc else ''])\n p = Popen(cmd, shell=True)\n p.wait()\n\n write_provenance(self)\n\n def output(self):\n\n return local.path(self.input()[0].dirname.replace('fs2dwi','wmql'))\n\n\n@requires(Wmql)\nclass TractMeasures(Task):\n\n exe= Parameter()\n\n def run(self):\n\n cmd = (' ').join([self.exe,\n '--inputtype Fibers_File_Folder',\n '--format Column_Hierarchy',\n '--separator Comma',\n '--inputdirectory', self.input(),\n '--outputfile', self.output()])\n p = Popen(cmd, shell=True)\n p.wait()\n\n write_provenance(self)\n\n def output(self):\n\n return local.path(self.input().replace('wmql', 'tractMeasures.csv'))\n\n\n@requires(Wmql)\nclass Wmqlqc(Task):\n\n id = Parameter()\n ses = Parameter(default='')\n\n def run(self):\n cmd = (' ').join(['wmqlqc.py',\n '-i', self.input(),\n '-s', self.id,\n '-o', self.output()])\n p = Popen(cmd, shell=True)\n p.wait()\n\n write_provenance(self)\n\n def output(self):\n\n return local.path(self.input().replace('wmql','wmqlqc'))\n","repo_name":"pnlbwh/luigi-pnlpipe","sub_path":"workflows/fs2dwi_pipe.py","file_name":"fs2dwi_pipe.py","file_ext":"py","file_size_in_byte":5162,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"3659497013","text":"\nfrom collections import defaultdict\n\nimport numpy\nimport pandas as pd\nimport torch\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\nfrom ..MY_PATHS import *\nfrom preprocess import (TensoredDataset, create_dict_of_tensor_datasets,\n create_lookups_for_vocab, create_vocab_from_tokens,\n pad_collate_fn, tokenize_dataset)\n\nSEED = 57\n\n\n# these values cannot be changed\nmonolingual_train_size = 30000\nmultilingual_train_size = 10000\nval_size = 1000\n\n\ndef get_mixed_datasets(LANGUAGES_LIST=(\"english\", \"russian\", \"hindi\"), SAVE=False, LOAD=True):\n \"\"\"\n @returns\n index_to_word, word_to_index, dict_wiki_tensor_dataset, weights_matrix_ve, classes\n \"\"\"\n LANGUAGES_DICT = defaultdict(dict)\n\n # assuming the data is in PATH_TO_DATA_FOLDER\n for language in LANGUAGES_LIST:\n language_code = language[:2]\n LANGUAGES_DICT[language][\"language_code\"] = language_code\n FILE_NAMES_DICT = {\n \"vocab\": f\"{PATH_TO_DATA_FOR_MODEL_FOLDER}vocab_all_{language_code}.pt\",\n \"monolingual_train\": f\"{PATH_TO_DATA_FOR_MODEL_FOLDER}df_wiki_monolingual_train_{monolingual_train_size}_{language_code}.pt\",\n \"multilingual_train\": f\"{PATH_TO_DATA_FOR_MODEL_FOLDER}df_wiki_multilingual_train_{multilingual_train_size}_{language_code}.pt\",\n \"val\": f\"{PATH_TO_DATA_FOR_MODEL_FOLDER}df_wiki_valid_{val_size}_{language_code}.pt\",\n \"test\": f\"{PATH_TO_DATA_FOR_MODEL_FOLDER}df_wiki_test_{language_code}.pt\",\n \"fasttext_embeddings\": f\"{PATH_TO_EMBEDDINGS_FOLDER}wiki.{language_code}.align.vec\",\n \"embed_matrix\": f'{PATH_TO_SAVED_EMBED_FOLDER}embeddings_matrix_with_idx_to_word_{language_code}.pt',\n }\n # ADD check that these files exist\n LANGUAGES_DICT[language][\"FILE_NAMES_DICT\"] = FILE_NAMES_DICT\n\n # LOAD vocab, tensor dataset, classes\n classes = torch.load(PATH_TO_DATA_FOLDER + \"45_classes_list.pt\")\n mlb = MultiLabelBinarizer(classes)\n\n for language, lang_dict in LANGUAGES_DICT.items():\n vocab = torch.load(lang_dict[\"FILE_NAMES_DICT\"][\"vocab\"])\n print(f\"{language} vocab size is:\", len(vocab))\n # LANGUAGES_DICT[language][\"vocab\"] = vocab\n LANGUAGES_DICT[language][\"index_to_word\"], LANGUAGES_DICT[language][\"word_to_index\"] =\\\n create_lookups_for_vocab(vocab)\n\n # Create combined vocab, index_to_word, word_to_index\n # 0 - , 1 - \n vocab = [\"\", \"\"]\n print(\"Order:\", LANGUAGES_DICT.keys())\n for language, lang_dict in LANGUAGES_DICT.items(): # .keys() keep same order in Python version >= 3.7\n assert lang_dict[\"index_to_word\"][0] != \"\"\n vocab += lang_dict[\"index_to_word\"]\n \n index_to_word, word_to_index = create_lookups_for_vocab(vocab)\n assert len(set(word_to_index)) == len(word_to_index)\n\n wiki_train, wiki_valid = [], []\n\n dict_of_dfs = defaultdict()\n\n for language, lang_dict in LANGUAGES_DICT.items():\n language_code = lang_dict[\"language_code\"]\n dict_of_dfs[f\"monolingual_train_{language_code}\"], dict_of_dfs[f\"multilingual_train_{language_code}\"] =\\\n (torch.load(lang_dict[\"FILE_NAMES_DICT\"][\"monolingual_train\"]),\n torch.load(lang_dict[\"FILE_NAMES_DICT\"][\"multilingual_train\"]))\n dict_of_dfs[f\"val_{language_code}\"] = torch.load(lang_dict[\"FILE_NAMES_DICT\"][\"val\"])\n wiki_train.append(dict_of_dfs[f\"multilingual_train_{language_code}\"])\n wiki_valid.append(dict_of_dfs[f\"val_{language_code}\"])\n\n wiki_train = pd.concat(wiki_train).sample(frac=1, random_state=SEED).reset_index(drop=True)\n wiki_valid = pd.concat(wiki_valid).sample(frac=1, random_state=SEED).reset_index(drop=True)\n # Add bilingual datasets\n wiki_train_en_ru = pd.concat([\n dict_of_dfs[f\"multilingual_train_en\"], dict_of_dfs[f\"multilingual_train_ru\"],\n ]).sample(frac=1, random_state=SEED).reset_index(drop=True)\n wiki_train_en_hi = pd.concat([\n dict_of_dfs[f\"multilingual_train_en\"], dict_of_dfs[f\"multilingual_train_hi\"],\n ]).sample(frac=1, random_state=SEED).reset_index(drop=True)\n wiki_train_ru_hi = pd.concat([\n dict_of_dfs[f\"multilingual_train_ru\"], dict_of_dfs[f\"multilingual_train_hi\"],\n ]).sample(frac=1, random_state=SEED).reset_index(drop=True)\n\n\n dict_of_dfs[\"train_en_ru\"] = wiki_train_en_ru\n dict_of_dfs[\"train_en_hi\"] = wiki_train_en_hi\n dict_of_dfs[\"train_ru_hi\"] = wiki_train_ru_hi\n dict_of_dfs[\"train\"] = wiki_train\n dict_of_dfs[\"val\"] = wiki_valid\n\n print(f\"Combined train size: {wiki_train.shape[0]} \\nCombined val size: {wiki_valid.shape[0]}\")\n # wiki_train.head()\n\n \n dict_wiki_tensor_dataset = create_dict_of_tensor_datasets(dict_of_dfs, word_to_index, max_num_tokens=None)\n\n for language, lang_dict in LANGUAGES_DICT.items():\n if LOAD:\n embed_info_dict = torch.load(lang_dict[\"FILE_NAMES_DICT\"][\"embed_matrix\"])\n LANGUAGES_DICT[language][\"weights_matrix_ve\"] = embed_info_dict[\"weights_matrix_ve\"]\n if SAVE:\n language_code = lang_dict[\"language_code\"]\n # 2.5 million\n embeddings = utils.load_vectors(lang_dict[\"FILE_NAMES_DICT\"][\"fasttext_embeddings\"])\n #Creating the weight matrix for pretrained word embeddings\n weights_matrix_ve = utils.create_embeddings_matrix(lang_dict[\"index_to_word\"], embeddings)\n LANGUAGES_DICT[language][\"weights_matrix_ve\"] = weights_matrix_ve\n # SAVE embeddings matrix together with index_to_word\n torch.save({\n \"index_to_word\" : lang_dict[\"index_to_word\"],\n \"weights_matrix_ve\" : weights_matrix_ve,\n }, lang_dict[\"FILE_NAMES_DICT\"][\"embed_matrix\"])\n print(\"Saved.\") \n\n #Creating the weight matrix for pretrained word embeddings\n # 0 - , 1 - \n weights_matrix_ve = torch.zeros(len(index_to_word), LANGUAGES_DICT[\"english\"][\"weights_matrix_ve\"].shape[1])\n start_idx = 2\n for language, lang_dict in LANGUAGES_DICT.items():\n end_idx = start_idx + len(lang_dict[\"index_to_word\"])\n assert index_to_word[start_idx:end_idx] == lang_dict[\"index_to_word\"]\n assert index_to_word[start_idx] == lang_dict[\"index_to_word\"][0]\n assert index_to_word[end_idx-1] == lang_dict[\"index_to_word\"][-1]\n weights_matrix_ve[start_idx:end_idx] = lang_dict[\"weights_matrix_ve\"]\n start_idx = end_idx\n\n print(f\"Embeddings matrix shape: {weights_matrix_ve.shape}, \\nVocab size: {len(vocab)}\")\n\n return index_to_word, word_to_index, dict_wiki_tensor_dataset, weights_matrix_ve, classes\n\n\n# class WikiData:\n# def __init__(self, languages_list, FILE_NAMES_DICT): # ADD specify FILE_NAMES_DICT in txt file\n# self.FILE_NAMES_DICT = FILE_NAMES_DICT\n# self.vocab = \n\n\n# class MultilingualWikiData:\n# def __init__(self, languages_list, FILE_NAMES_DICT):\n\n\n# class MonolingualWikiData:\n# def __init__(self, language, FILE_NAMES_DICT):\n","repo_name":"mmarinated/topic-modeling","sub_path":"data_creation/wiki_dataset.py","file_name":"wiki_dataset.py","file_ext":"py","file_size_in_byte":7025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11142232266","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 4 19:13:11 2022\r\n\r\n@author: Narendra_IITJ\r\n\"\"\"\r\n\"\"\" \r\n1 : car : 2754 objects\r\n2 : truck : 614 objects\r\n9 : van : 202 objets\r\n11: pickup : 1910 objets\r\n\"\"\"\r\nimport pandas as pd\r\nimport os\r\nimport numpy as np\r\nimport cv2\r\nimport time\r\n\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.layers import AveragePooling2D\r\nfrom tensorflow.keras.layers import MaxPooling2D\r\nfrom tensorflow.keras.applications import VGG16\r\nfrom tensorflow.keras.layers import Dropout\r\nfrom tensorflow.keras.layers import Flatten\r\nfrom tensorflow.keras.layers import Dense\r\nfrom tensorflow.keras.layers import Input\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.utils import to_categorical\r\nfrom sklearn.preprocessing import LabelBinarizer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import classification_report\r\n#from imutils import paths\r\nimport matplotlib.pyplot as plt\r\nimport argparse\r\n\r\n\r\n#%%\r\nstart = time.time()\r\nAnnotations = []\r\ndirectory = 'Annotations512'\r\nfor filename in os.listdir(directory):\r\n f = os.path.join(directory, filename)\r\n df = pd.read_csv(f,header = None,sep = ' ')\r\n locations = []\r\n for i in range(len(df)):\r\n n1 = int(df.iloc[i,0])\r\n n2 = int(df.iloc[i,1])\r\n n11 = max(n1-10,0)\r\n n12 = min(n1+10,511)\r\n n21 = max(n2-10,0)\r\n n22 = min(n2+10,511)\r\n label = int(df.iloc[i,3])\r\n locations.append([[n11, n12, n21, n22],[label]])\r\n \r\n Annotations.append(locations)\r\n Annotations.append(locations)\r\n \r\nend = time.time()\r\nprint(\"process took\",end-start,\"seconds\")\r\n \r\n#%%\r\nstart = time.time()\r\ndirectory = 'Vehicules512'\r\nINPUT_SIZE = (64, 64)\r\nindex = 0\r\ndata = []\r\nlabels = []\r\ncar_pickup_annotations = [] #startX, startY, endX, endY\r\nfor filename in os.listdir(directory):\r\n f = os.path.join(directory, filename)\r\n orig = cv2.imread(f)\r\n localAnnotations = []\r\n for annotation in Annotations[index]:\r\n label = annotation[1][0]\r\n if(label == 1 or label == 11):\r\n roiOrig = orig[annotation[0][2]:annotation[0][3], annotation[0][0]:annotation[0][1]]\r\n roi = cv2.cvtColor(roiOrig, cv2.COLOR_BGR2RGB)\r\n roi = cv2.resize(roi, INPUT_SIZE)\r\n localAnnotations.append(annotation[0])\r\n data.append(roi)\r\n if(index%2 == 0):\r\n labels.append([label,1]) #color image\r\n else:\r\n labels.append([label,0]) #grey image\r\n index += 1\r\n car_pickup_annotations.append(localAnnotations)\r\n \r\nend = time.time()\r\nprint(\"process took\",end-start,\"seconds\")\r\n\r\n\r\n#%%\r\n\r\ncar = 0\r\ntruck = 0\r\nvan = 0\r\npickup = 0\r\nfor lable in labels:\r\n \r\n if(lable == 1): \r\n car+=1\r\n if(lable == 2): \r\n truck +=1\r\n if(lable == 9): \r\n van += 1\r\n if(lable == 11): \r\n pickup += 1\r\n\r\n \r\nprint(\"car = \",car)\r\nprint(\"truck = \",truck)\r\nprint(\"van = \",van)\r\nprint(\"pickup = \",pickup)\r\n\r\n\r\n#%%\r\n\r\nap = argparse.ArgumentParser()\r\n#ap.add_argument(\"-d\", \"--dataset\", required=True,\r\n#\thelp=\"path to input dataset\")\r\nap.add_argument(\"-e\", \"--epochs\", type=int, default=10,\r\n\thelp=\"# of epochs to train our network for\")\r\nap.add_argument(\"-p\", \"--plot\", type=str, default=\"plot.png\",\r\n\thelp=\"path to output loss/accuracy plot\")\r\nargs = vars(ap.parse_args())\r\n\r\n#%%\r\n\r\ndata = np.array(data)\r\nlabels = np.array(labels)\r\nIsColor = labels[:,1]\r\nlabels = labels[:,0]\r\n\r\nlb = LabelBinarizer()\r\nlabels = lb.fit_transform(labels)\r\nlabels = to_categorical(labels)\r\nlabels = np.c_[labels,IsColor]\r\n(trainX, testX, trainY, testY) = train_test_split(data, labels,\r\n\ttest_size=0.25, random_state=42) #stratify=labels,\r\n\r\ntrainIsColor = trainY[:,2]\r\ntrainY = trainY[:,:2]\r\n\r\ntestIsColor = testY[:,2]\r\ntestY = testY[:,:2]\r\n#%%\r\nplt.imshow(data[3014])\r\nplt.show()\r\n#%%\r\n\r\nstart = time.time()\r\ntrainAug = ImageDataGenerator(\r\n\trotation_range=30,\r\n\tzoom_range=0.15,\r\n\twidth_shift_range=0.2,\r\n\theight_shift_range=0.2,\r\n\tshear_range=0.15,\r\n\thorizontal_flip=True,\r\n\tfill_mode=\"nearest\")\r\n\r\nvalAug = ImageDataGenerator()\r\n\r\nmean = np.array([123.68, 116.779, 103.939], dtype=\"float32\")\r\ntrainAug.mean = mean\r\nvalAug.mean = mean\r\n\r\nend = time.time()\r\nprint(\"process took\",end-start,\"seconds\")\r\n\r\n\r\n#%%\r\nstart = time.time()\r\nOrigVGGModel = VGG16()\r\nprint(\"################### Origional VGG16 model summary #######################\")\r\nprint()\r\nprint(OrigVGGModel.summary())\r\nprint()\r\n\r\nbaseModel = VGG16(weights=\"imagenet\", include_top=False,\r\n\tinput_tensor=Input(shape=(64, 64, 3)))\r\n\r\nprint(\"######################## Base Model summary ###################################\")\r\nprint()\r\nprint(baseModel.summary())\r\nprint()\r\n\r\n\r\nheadModel = baseModel.layers[-3].output\r\nheadModel = AveragePooling2D(pool_size=(2, 2))(headModel)\r\n#headModel = MaxPooling2D(pool_size=(2, 2))(headModel)\r\nheadModel = Flatten(name=\"flatten\")(headModel)\r\nheadModel = Dense(64, activation=\"relu\")(headModel)\r\n#headModel = Dense(64, activation=\"relu\")(headModel)\r\nheadModel = Dropout(0.5)(headModel)\r\nheadModel = Dense(2, activation=\"softmax\")(headModel)\r\n\r\n#print(headModel.summary())\r\nmodel = Model(inputs=baseModel.input, outputs=headModel)\r\n\r\nprint(\"######################## Main Model summary ###################################\")\r\nprint()\r\nprint(model.summary())\r\n\r\nfor layer in baseModel.layers:\r\n\tlayer.trainable = False\r\n \r\nopt = Adam(lr=1e-5)\r\n\r\nmodel.compile(loss= \"binary_crossentropy\", optimizer=opt,\r\n\tmetrics=[\"accuracy\"])\r\n\r\nend = time.time()\r\nprint(\"process took\",end-start,\"seconds\")\r\n#%%\r\nstart = time.time()\r\n\r\nH = model.fit(\r\n\tx=trainAug.flow(trainX, trainY, batch_size=32),\r\n\tsteps_per_epoch=len(trainX) // 32,\r\n\tvalidation_data=valAug.flow(testX, testY),\r\n\tvalidation_steps=len(testX) // 32,\r\n\tepochs=args[\"epochs\"])\r\n\r\n\r\n\r\nend = time.time()\r\nprint(\"process took\",end-start,\"seconds\")\r\n\r\n#%%\r\n\r\nprint(\"saving the model: \")\r\nstart = time.time()\r\n#model and weights will be saved in current directory. you can save it to specific location also.\r\nmodel.save('Name_of_your_model.h5')\r\nmodel.save_weights('Name_of_weights')\r\n\r\nend = time.time()\r\nprint(\"process took\",end-start,\"seconds\")\r\n\r\n#%%\r\nstart = time.time()\r\npredictions = model.predict(x=testX.astype(\"float32\"), batch_size=32)\r\n\r\nend = time.time()\r\nprint(\"process took\",end-start,\"seconds\")\r\n#%%\r\nprint(\"classification report\")\r\nprint(classification_report(testY.argmax(axis=1),\r\n\tpredictions.argmax(axis=1),target_names=['car','pickup']))\r\n\r\nprint()\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score\r\npredict = predictions.argmax(axis = 1)\r\npredict_color = predict[testIsColor == 1]\r\npredict_grey = predict[testIsColor == 0]\r\n\r\ny_true = testY.argmax(axis = 1)\r\ny_true_color = y_true[testIsColor == 1]\r\ny_true_grey = y_true[testIsColor == 0]\r\n\r\nprint(\"############## Combined ###############\")\r\nprint(\"counfusion matrix\")\r\nprint(confusion_matrix(y_true, predict))\r\n\r\nprint()\r\nprint(\"accuracy: \",accuracy_score(y_true, predict))\r\nprint()\r\n\r\n\r\nprint(\"############## Color ###############\")\r\nprint(\"counfusion matrix\")\r\nprint(confusion_matrix(y_true_color, predict_color))\r\n\r\nprint()\r\nprint(\"accuracy: \",accuracy_score(y_true_color, predict_color))\r\nprint()\r\n\r\n\r\nprint(\"############## grey ###############\")\r\nprint(\"counfusion matrix\")\r\nprint(confusion_matrix(y_true_grey, predict_grey))\r\n\r\nprint()\r\nprint(\"accuracy: \",accuracy_score(y_true_grey, predict_grey))\r\n#%%\r\ndirectory = 'Vehicules512'\r\nfilename = '00000044_co.png'\r\nf = os.path.join(directory, filename)\r\nimg = cv2.imread(f)\r\nclone = img.copy()\r\nINPUT_SIZE = (64, 64)\r\nDataImg = []\r\nfor annotation in car_pickup_annotations[76]:\r\n roiOrig = img[annotation[2]:annotation[3], annotation[0]:annotation[1]]\r\n roi = cv2.cvtColor(roiOrig, cv2.COLOR_BGR2RGB)\r\n roi = cv2.resize(roi, (64,64))\r\n DataImg.append(roi)\r\n \r\nDataImg = np.array(DataImg)\r\npred = model.predict(x = DataImg.astype(\"float32\"),batch_size=32)\r\npred = pred.argmax(axis = 1)\r\n\r\n \r\n#%%\r\n\r\n#%%\r\ni = 0\r\nfor annotation in car_pickup_annotations[76]:\r\n predLabel = 'car'\r\n roiOrig = img[annotation[2]:annotation[3], annotation[0]:annotation[1]]\r\n roi = cv2.cvtColor(roiOrig, cv2.COLOR_BGR2RGB)\r\n roi = np.array(cv2.resize(roi, (64,64)))\r\n print(roi.shape)\r\n plt.imshow(roi)\r\n plt.show()\r\n \r\n predI = pred[i]\r\n if(predI == 0):\r\n predLabel = 'car'\r\n else:\r\n predLabel = 'pickup'\r\n \r\n startX = annotation[0]\r\n startY = annotation[2]\r\n endX = annotation[1]\r\n endY = annotation[3]\r\n i = i +1\r\n \r\n cv2.rectangle(clone, (startX, startY), (endX, endY),(0, 255, 0), 2)\r\n if(startY -10>10):\r\n y = startY - 10\r\n else:\r\n y = startY + 10\r\n cv2.putText(clone, predLabel, (startX, y),cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)\r\n plt.imshow(clone)\r\n\r\n \r\n \r\n\r\n\r\n\r\n#%%\r\nN = args[\"epochs\"]\r\nplt.style.use(\"ggplot\")\r\nplt.figure()\r\nplt.plot(np.arange(0, N), H.history[\"loss\"], label=\"train_loss\")\r\nplt.plot(np.arange(0, N), H.history[\"val_loss\"], label=\"val_loss\")\r\nplt.title(\"Training Loss on Dataset\")\r\nplt.xlabel(\"Epoch #\")\r\nplt.ylabel(\"Loss\")\r\nplt.legend(loc=\"upper right\")\r\n#plt.savefig(args[\"plot\"])\r\n\r\nN = args[\"epochs\"]\r\nplt.style.use(\"ggplot\")\r\nplt.figure()\r\nplt.plot(np.arange(0, N), H.history[\"accuracy\"], label=\"train_acc\")\r\nplt.plot(np.arange(0, N), H.history[\"val_accuracy\"], label=\"val_acc\")\r\nplt.title(\"Training Accuracy on Dataset\")\r\nplt.xlabel(\"Epoch #\")\r\nplt.ylabel(\"Accuracy\")\r\nplt.legend(loc=\"upper right\")\r\n#plt.savefig(args[\"plot\"])\r\n\r\n","repo_name":"Narendra-027/Vehicle-detection-in-aerial-imagery","sub_path":"model_create_train.py","file_name":"model_create_train.py","file_ext":"py","file_size_in_byte":9638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34287100033","text":"import sys\n\nsys.path.append(\"./\")\nsys.path.append(\"../\")\n\nimport json\nfrom script_utils import (\n create_deploy_command,\n run_command,\n write_result_to_storage,\n owner_account,\n)\n\nfrom contracts.lib.openzeppelin.tests.utils import str_to_felt, to_uint, felt_to_str\n\nowner = json.load(open(owner_account()))[\"address\"]\n\nrouter_deploy = create_deploy_command(\"optio_controller\", [owner])\nrouter_address, rtx = run_command(router_deploy)\n\npool_deploy = create_deploy_command(\n \"optio_pool\",\n [\n router_address,\n str_to_felt(\"OPTIO\"),\n str_to_felt(\"OPT\"),\n *to_uint(1000),\n owner,\n ],\n)\n\npool_address, ptx = run_command(pool_deploy)\n\n\n# @dev ERC20 deployment\ncmd = create_deploy_command(\n \"ERC20_Mintable\",\n [\n str_to_felt(\"testUSDC\"),\n str_to_felt(\"TUSDC\"),\n *to_uint(100000),\n pool_address,\n owner,\n ]\n)\na, t = run_command(cmd)\nwrite_result_to_storage(\n {\n \"name\": \"testUSDC\",\n \"symbol\": \"TUSDC\",\n \"address\": a,\n \"transaction\": t,\n },\n \"erc20\"\n)\n\n\n# @dev ERC721 deployment\ncmd = create_deploy_command(\n \"ERC721_Mintable_Burnable\",\n [\n str_to_felt(\"Optio token\"),\n str_to_felt(\"OPTI\"),\n owner,\n ]\n)\na, t = run_command(cmd)\nwrite_result_to_storage(\n {\n \"name\": \"Optio token\",\n \"symbol\": \"OPTI\",\n \"address\": a,\n \"transaction\": t,\n },\n \"erc721\"\n)\n\n\npool_router_dict = {\n \"CONTROLLER\": {\"address\": router_address, \"transaction\": rtx},\n \"POOL\": {\"address\": pool_address, \"transaction\": ptx},\n}\nwrite_result_to_storage(pool_router_dict, \"current_deployment_info\")\n","repo_name":"Optio-Finance/spark-boilerplate","sub_path":"scripts/python/02_deploy.py","file_name":"02_deploy.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13783857996","text":"\"\"\"\nBase_64 Utility Functions\n================\n\nFunctions used to encode or decode string data\nor to encrypt or decrypt password\nIt uses cryptography package to perform encryption/decryption.\n\"\"\"\n\nimport base64\nimport logging\n\nLOGGER = logging.getLogger(__name__)\nLOGGER.setLevel(logging.DEBUG)\n\ndef encodestring(text):\n \"\"\"\n Encodes string\n\n :Parameters:\n 1. text = string to be encoded.\n\n :Return: encoded string\n \"\"\"\n #required to be bytes\n encoded = base64.b64encode(_string2bytes(text))\n LOGGER.debug(text)\n LOGGER.debug(encoded)\n return _string2utf8(encoded)\n\ndef decodestring(text):\n \"\"\"\n Decodes string\n\n :Parameters:\n 1. text = string to be decoded.\n\n :Return: decoded string\n \"\"\"\n #required to be bytes\n decoded = base64.b64decode(_string2bytes(text))\n LOGGER.debug(text)\n LOGGER.debug(decoded)\n return _string2utf8(decoded)\n\ndef _string2utf8(data):\n \"\"\"\n Helper function to convert data to uft-8 string\n\n :Parameters:\n 1. data = data to convert\n\n :Return: (string)\n \"\"\"\n try:\n return data.decode(\"utf-8\")\n except AttributeError:\n return data\n\ndef _string2bytes(data):\n \"\"\"\n Helper function to convert data to bytes\n\n :Parameters:\n 1. data = data to convert\n\n :Return: (bytes)\n \"\"\"\n try:\n return data.encode()\n except AttributeError:\n return data\n\nif __name__ == \"__main__\":\n string = \"encode this string\"\n encoded = encodestring(string)\n print(encoded)\n print(decodestring(encoded))\n\n pwd = \"encode this password\"\n encrypted = encodestring(pwd)\n print(encrypted)\n print(\"\\n\")\n print(decodestring(encrypted))","repo_name":"TrellixVulnTeam/CloudAutomation_KDSZ","sub_path":"venv/Lib/site-packages/lxk_testlib/utils/base_64.py","file_name":"base_64.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40235701384","text":"\"\"\"Database singlethon class\"\"\"\nimport os\nfrom sqlmodel import create_engine, Session, select\nfrom config import settings\n\n\nclass Singlethon:\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = super().__new__(cls, *args, **kwargs)\n\n return cls._instance\n\n\nclass DatabaseEngine(Singlethon):\n \"\"\"Database Engine.\"\"\"\n def __init__(self):\n sql_file_path = os.path.join(settings.DATABASE.FOLDER_PATH, settings.DATABASE.NAME)\n sqlite_url = f\"sqlite:///{sql_file_path}\"\n\n self.engine = create_engine(sqlite_url, echo=False)\n\n def get(self, statement, first: bool):\n \"\"\"Get elements of the sql statement.\"\"\"\n with Session(self.engine) as sess:\n results = sess.exec(\n statement=statement\n ).all()\n\n return results[0] if first else results\n\n def add(self, obj, batch: bool = False):\n \"\"\"Add object.\"\"\"\n if not batch:\n with Session(self.engine) as sess:\n sess.add(obj)\n sess.commit()\n sess.refresh(obj)\n\n def delete(self, obj, batch: bool = False) -> bool:\n \"\"\"Delete object.\"\"\"\n if not batch:\n with Session(self.engine) as sess:\n if not obj:\n return False\n\n sess.delete(obj)\n sess.commit()\n\n return True\n","repo_name":"uysalserkan/url-shorter","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"16731576136","text":"from collections import Counter\n\nwith open('d11_input.txt') as f:\n puzzle_input = f.read().strip().split(',')\n\n\ndef simplify_path(path):\n\n # looks like the order matters here; need to cancel out the 0-dist pairs first\n # would need collections.OrderedDict if this wasn't Python 3.6\n reduce_table = {\n ('n', 's'): '',\n ('nw', 'se'): '',\n ('ne', 'sw'): '',\n ('n', 'se'): 'ne',\n ('ne', 's'): 'se',\n ('se', 'sw'): 's',\n ('s', 'nw'): 'sw',\n ('sw', 'n'): 'nw',\n ('nw', 'ne'): 'n',\n }\n\n step_counts = Counter(path)\n for direction_pair, direction_replacement in reduce_table.items():\n replace_count = min(step_counts[direction] for direction in direction_pair)\n for direction in direction_pair:\n step_counts[direction] -= replace_count\n if direction_replacement:\n step_counts[direction_replacement] += replace_count\n\n return step_counts\n\n\ndef calculate_distance(path):\n pos = Counter()\n furthest = 0\n for step in path:\n pos.update([step])\n pos = simplify_path(pos)\n furthest = max(sum(pos.values()), furthest)\n return furthest\n\n\nprint(sum(simplify_path('ne,ne,ne'.split(',')).values()))\nprint(sum(simplify_path('ne,ne,sw,sw'.split(',')).values()))\nprint(sum(simplify_path('ne,ne,s,s'.split(',')).values()))\nprint(sum(simplify_path('ne,ne,ne'.split(',')).values()))\nprint(sum(simplify_path('se,sw,se,sw,sw'.split(',')).values()))\nprint(sum(simplify_path(puzzle_input).values()))\n\nprint(calculate_distance(puzzle_input))\n","repo_name":"sclarke/adventofcode2017","sub_path":"d11.py","file_name":"d11.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"26707438529","text":"# -- coding: utf-8 --\nfrom f110_gym.envs.f110_env import F110Env\nfrom gym import spaces\n\nimport time\nimport yaml\nimport gym\nimport numpy as np\nfrom argparse import Namespace\nimport torch\n\nfrom numba import njit\nfrom pyglet.gl import GL_POINTS\n\nfrom utils import path_filler, render_callback\nfrom torch.distributions import Categorical\nimport matplotlib.pyplot as plt\nimport random\nfrom math import pi\nfrom common.policys import GapFollowPolicy, RandomPolicy\nimport cv2\nfrom time import strftime, gmtime\nimport json\nimport os\n\n\ndef create_f110env(**kargs):\n # load simulator config file(define map, waypoints, etc)\n with open(kargs['sim_cfg_file']) as file:\n conf_dict = yaml.load(file, Loader=yaml.FullLoader)\n sim_cfg = Namespace(**conf_dict)\n\n # choose continuous/discrete action space\n if kargs['continuous_action']:\n env = F110Env_Continuous_Action(sim_cfg=sim_cfg, **kargs)\n else:\n env = F110Env_Discrete_Action(sim_cfg=sim_cfg, **kargs)\n\n # render option\n if kargs['render_env']:\n env.f110.add_render_callback(render_callback)\n return env\n\n\ndef create_dictObs_eval_env(cfg=None):\n env_cfg = json.load(open(os.path.join(path_filler('config'), 'rlf110_env_cfg.json')))\n env_cfg['dictObs'] = True\n env_cfg['render_env'] = True\n env_cfg['obs_shape'] = 108\n env = create_f110env(**env_cfg)\n return env\n\n\nclass Waypoints_Manager:\n def __init__(self, wp_path=None, save_wp=False, load_wp=False) -> None:\n self.wp_path = wp_path\n self.wp = [] # (k, n)\n self.saveWp = save_wp\n self.loadWp = load_wp\n if self.saveWp:\n # self.log = open(strftime('./wp-%Y-%m-%d-%H-%M-%S',gmtime())+'.csv', 'w')\n self.log = open('./new_wp.csv', 'w')\n if self.loadWp:\n self.load_wp()\n\n def load_wp(self):\n with open(self.wp_path, encoding='utf-8') as f:\n self.waypoints = np.loadtxt(f, delimiter=',')\n # import ipdb; ipdb.set_trace()\n self.waypoints_xytheta = \\\n np.vstack([self.waypoints[:, 0], self.waypoints[:, 1], self.waypoints[:, 2]])\n self.wp = self.waypoints_xytheta # (3, n)\n\n def draw_wp(self):\n plt.plot(self.wp[0], self.wp[1], '-ro', markersize=0.1)\n plt.show()\n\n def get_nearest_wp(self, cur_position):\n # position: (x, y)\n wp_xyaxis = self.wp[:2] # (2, n)\n dist = np.linalg.norm(wp_xyaxis - cur_position.reshape(2, 1), axis=0)\n nearst_idx = np.argmin(dist)\n nearst_point = wp_xyaxis[:, nearst_idx]\n return nearst_point\n\n def get_wpbased_error(self, raw_obs, ego_idx=0):\n # pose: (x, y, yaw)\n x, y, theta = raw_obs['poses_x'][ego_idx], raw_obs['poses_y'][ego_idx], raw_obs['poses_theta'][ego_idx]\n pose = np.array([x, y, theta])\n nearstP = self.get_nearest_wp(pose[:2])\n\n euler_error = np.linalg.norm(nearstP-pose[:2])\n # return euler_error\n\n yaw = pose[2]\n local2global = np.array([[np.cos(yaw), -np.sin(yaw), 0, pose[0]],\n [np.sin(yaw), np.cos(yaw), 0, pose[1]],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n # wp_xyaxis = self.wp[:2]\n global2local = np.linalg.inv(local2global)\n nearstP_local = global2local @ np.array([nearstP[0], nearstP[1], 0, 1])\n lateral_error = nearstP_local[1]\n return abs(lateral_error)\n\n def save_wp(self, raw_obs, ego_idx=0):\n x, y, theta = raw_obs['poses_x'][ego_idx], raw_obs['poses_y'][ego_idx], raw_obs['poses_theta'][ego_idx]\n self.log.write('%f, %f, %f\\n' % (x, y, theta))\n\n\nclass Lidar_Manager:\n def __init__(self, scan_dim=108, window_H=250, scanScale=10, xy_range=30):\n \"\"\"\n Include the code of displays the Lidar scan and reconstruction using cv2\n \"\"\"\n window_W = scan_dim + window_H\n self.obs_gap = int(1080/scan_dim)\n self.scan_dim = scan_dim\n self.dimension = scan_dim\n self.map_size = window_H\n self.map = np.zeros((self.map_size, self.map_size))\n\n self.lidar_dmin = 0\n self.lidar_dmax = 30\n self.angle_min = -135\n self.angle_max = 135\n self.x_min, self.x_max = -xy_range, xy_range\n self.y_min, self.y_max = -xy_range, xy_range\n self.resolution = (2*xy_range) / (window_H-1)\n print(f'lidar_resolution{self.resolution}')\n self.lidar_angles = np.linspace(self.angle_min, self.angle_max, self.dimension) * np.pi / 180\n self.interpolate_or_not = False\n\n # windows\n self.scanScale = scanScale\n self.window_H = window_H\n self.window_W = window_W\n self.lidar_scanPic = np.zeros((self.window_H, 1080, 3), np.uint8)\n self.lidar_reconstructPic = np.zeros((self.map_size, self.map_size, 3), np.uint8)\n # import ipdb;ipdb.set_trace()\n self.lidar_window = np.hstack([self.lidar_scanPic, self.lidar_reconstructPic])\n print(f'lidar_window shape{self.lidar_window.shape}')\n\n def rays2world(self, distance):\n # convert lidar scan distance to 2d locations in space\n x = distance * np.cos(self.lidar_angles)\n y = distance * np.sin(self.lidar_angles)\n return x, y\n\n def grid_cell_from_xy(self, x, y):\n # convert 2d locations in space to 2d array coordinates\n x = np.clip(x, self.x_min, self.x_max)\n y = np.clip(y, self.y_min, self.y_max)\n\n cell_indices = np.zeros((2, x.shape[0]), dtype='int')\n cell_indices[0, :] = np.floor((x - self.x_min) / self.resolution)\n cell_indices[1, :] = np.floor((y - self.y_min) / self.resolution)\n return cell_indices\n\n def interpolate(self, cell_indices):\n for i in range(cell_indices.shape[1] - 1):\n fill_x = np.linspace(cell_indices[1, i], cell_indices[1, i + 1], endpoint=False, dtype='int')\n fill_y = np.linspace(cell_indices[0, i], cell_indices[0, i + 1], endpoint=False, dtype='int')\n self.map[fill_x, fill_y] = 1\n\n def update_scan2map(self, lidar_1d, color='r'):\n if color == 'r':\n color_tuple = (0, 0, 255)\n elif color == 'b':\n color_tuple = (255, 0, 0)\n\n self.map = np.zeros((self.map_size, self.map_size))\n self.lidar_reconstructPic = np.zeros((self.map_size, self.map_size, 3), np.uint8)\n\n self.distance = lidar_1d\n self.dimension = len(self.distance)\n\n x, y = self.rays2world(self.distance)\n cell_indices = self.grid_cell_from_xy(x, y)\n self.map[cell_indices[1, :], cell_indices[0, :]] = 1\n\n if self.interpolate_or_not:\n self.interpolate(cell_indices[:, :])\n\n cell_indices_line = np.vstack([cell_indices[0, :], cell_indices[1, :]]).T\n cv2.polylines(self.lidar_reconstructPic, [cell_indices_line], False, color_tuple, 2)\n return self.lidar_reconstructPic\n # self.lidar_reconstructPic[:, :, 2][np.nonzero(self.map)[0], np.nonzero(self.map)[1]] = 200\n # import ipdb; ipdb.set_trace()\n\n # plt.imshow(self.map)\n # plt.show()\n\n def update_scan(self, best_p_idx=None, scan=None):\n self.lidar_scanPic = np.zeros((self.window_H, self.scan_dim, 3), np.uint8)\n scan = (scan * self.scanScale).astype(np.int64)\n scan = np.vstack([np.arange(len(scan)).astype(np.int64), self.window_H - scan]).T\n\n cv2.polylines(self.lidar_scanPic, [scan], False, (0, 0, 255), 2)\n if best_p_idx:\n target = scan[max(0, best_p_idx - 1):best_p_idx + 1, :]\n cv2.polylines(self.lidar_scanPic, [target], False, (0, 255, 0), 2)\n return self.lidar_scanPic\n\n def update_lidar_windows(self, wait=1, obs=None, target_idx=None):\n # if 'scans' in obs.keys():\n scan = obs['scans']\n scan = scan[::self.obs_gap]\n # scan = obs['scans'][0]\n self.update_scan2map(scan)\n self.update_scan(best_p_idx=target_idx, scan=scan)\n self.lidar_window = np.hstack([self.lidar_scanPic, self.lidar_reconstructPic])\n cv2.imshow('debug', self.lidar_window)\n cv2.waitKey(wait)\n\n\nclass F110Env_RL:\n def __init__(self, continuous_action=True, sim_cfg=None, **kargs) -> None:\n\n for key, value in kargs.items():\n setattr(self, key, value)\n\n self.f110 = F110Env(map=sim_cfg.map_path, map_ext=sim_cfg.map_ext, num_agents=1)\n self.conf = sim_cfg\n self.observation_space = spaces.Box(low=0, high=1000, shape=(self.obs_shape, 1))\n self.observation_gap = 1080 // self.obs_shape\n\n # waypoints Manager, for lateral error\n self.wpManager = Waypoints_Manager(sim_cfg.wpt_path, save_wp=False, load_wp=True)\n self.wpManager.load_wp()\n self.waypoints_xytheta = self.wpManager.wp.T\n self.lateral_error_thres = 0.2\n\n # lidar\n if self.display_lidar:\n self.lidarManager = Lidar_Manager(scan_dim=self.obs_shape)\n\n # for offline data storage\n self.action_size = self.action_space.n if hasattr(self.action_space, 'n') else self.action_space.shape[0]\n # self._max_episode_steps = time_limit\n\n self.episode = []\n\n def reset(self):\n starting_idx = random.sample(range(len(self.waypoints_xytheta)), 1)\n # print(self.waypoints_xytheta[starting_idx])\n x, y = self.waypoints_xytheta[starting_idx][0, 0], self.waypoints_xytheta[starting_idx][\n 0, 1] # because self.waypoints_xytheta[starting_idx] has shape(1,3)\n # theta = 2*random.random() - 1\n theta_noise = (2*random.random() - 1) * 0.2\n theta = self.waypoints_xytheta[starting_idx][0, 2] + theta_noise\n starting_pos = np.array([[x, y, theta]])\n # starting_pos[-1] += 0.5\n raw_obs, _, _, _ = self.f110.reset(starting_pos)\n # raw_obs, _, _, _ = self.f110.reset(np.array([[self.conf.sx, self.conf.sy, self.conf.stheta]]))\n obs = self.get_obs(raw_obs)\n\n # dict\n if self.dictObs:\n obs['action'] = np.zeros(self.action_size)\n obs['reward'] = np.array(0.0)\n obs['terminal'] = np.array(False)\n obs['reset'] = np.array(True)\n obs['scans'] = raw_obs['scans'][0]\n # obs['raw_obs'] = raw_obs\n self.episode = [obs.copy()]\n\n # time limit\n self.step_ = 0\n\n return obs\n\n def get_obs(self, raw_obs: dict):\n obs = raw_obs['scans'][0][::self.observation_gap]\n # print(self.observation_gap)\n if self.dictObs:\n if len(obs.shape) == 1:\n return {'vecobs': obs} # Vector env\n else:\n return {'image': obs} # Image env\n else:\n return obs\n\n def get_reward(self, raw_obs: dict, crash: bool):\n # if 'raw_obs' in raw_obs.keys():\n # raw_obs = raw_obs['raw_obs']\n wp_based_error = self.wpManager.get_wpbased_error(raw_obs)\n\n if crash:\n reward = -0.05\n elif wp_based_error > self.lateral_error_thres:\n # print(f'lateral_error:{wp_based_error}')\n reward = 0.0\n # reward -= wp_based_error * 0.01\n else:\n reward = 0.02\n return reward\n\n def render(self, mode='human'):\n self.f110.render(mode)\n\n def close(self):\n self.f110.close()\n\n def get_action(self):\n raise NotImplementedError\n\n def step(self, action):\n raise NotImplementedError\n\n\nclass F110Env_Discrete_Action(F110Env_RL):\n def __init__(self, continuous_action=True, sim_cfg=None, **kargs):\n self.action_space = spaces.Discrete(3)\n super().__init__(continuous_action, sim_cfg, **kargs)\n # steer, speed\n for key, value in kargs.items():\n setattr(self, key, value)\n\n self.f110_action = np.array([\n # go straight\n [1, self.speed], # go left\n [-1, self.speed], # go right\n [0, self.speed],\n ])\n\n def get_action(self, action_idx: int) -> np.ndarray:\n return self.f110_action[action_idx].reshape(1, -1)\n\n def step(self, action):\n # action for dict\n if self.dictObs:\n if isinstance(action, int):\n action_vec = np.zeros(self.action_size)\n action_vec[action] = 1.0\n else:\n assert isinstance(action, np.ndarray) and action.shape == (\n self.action_size,), \"Wrong one-hot action shape\"\n action_vec = action\n\n action = self.get_action(action)\n raw_obs, reward, done, info = self.f110.step(action)\n info = {}\n obs = self.get_obs(raw_obs)\n reward = self.get_reward(raw_obs, done)\n\n # time_limit\n if self.limited_time:\n self.step_ += 1\n if self.step_ >= self.env_time_limit:\n done = True\n info['time_limit'] = True\n\n ## info\n if self.dictObs:\n obs['action'] = action_vec\n obs['reward'] = np.array(reward)\n obs['terminal'] = np.array(False if self.no_terminal else done)\n obs['reset'] = np.array(False)\n obs['scans'] = raw_obs['scans'][0]\n\n self.episode.append(obs.copy())\n if done:\n episode = {k: np.array([t[k] for t in self.episode]) for k in self.episode[0]}\n info['episode'] = episode\n\n return obs, reward, done, info\n # return obs, reward, done, info\n\n\n########### Bowen Jiang, Added on Apr 17, 2022\nclass F110Env_Continuous_Action(F110Env_RL):\n def __init__(self, continuous_action=True, sim_cfg=None, **kargs):\n self.action_space = spaces.Box(low=-1, high=1, shape=(1,))\n super().__init__(continuous_action, sim_cfg, **kargs)\n # steer, speed\n for key, value in kargs.items():\n setattr(self, key, value)\n self.action_size = self.action_space.shape[0]\n\n def get_action(self, action) -> np.ndarray:\n # if type(action) != int:\n # return action.reshape(1, -1)\n # try:\n if type(action) == np.ndarray:\n action = action[0]\n # except:\n # print('no valid steer')\n # print(action)\n # action = 0.0\n steer = np.clip(action, a_min=-1, a_max=1)\n # import ipdb; ipdb.set_trace()\n # steer = np.clip(action, a_min=-1, a_max=1)\n action = np.array([steer, self.speed]).astype(np.float32)\n return action.reshape(1, -1)\n\n def step(self, action):\n exe_action = self.get_action(action)\n # import ipdb\n # ipdb.set_trace()\n # print(action)\n # done = False\n raw_obs, reward, done, info = self.f110.step(exe_action)\n info = {}\n ## make 2 step with the same action\n # step = 3\n # while not done and step > 0:\n # raw_obs, reward, done, info = self.f110.step(action)\n # step -= 1\n ## give penalty for hitting the wall\n obs = self.get_obs(raw_obs)\n reward = self.get_reward(raw_obs, done)\n\n # time_limit\n if self.limited_time:\n self.step_ += 1\n if self.step_ >= self.env_time_limit:\n done = True\n info['time_limit'] = True\n\n ## info\n if self.dictObs:\n obs['action'] = action\n obs['reward'] = np.array(reward)\n obs['terminal'] = np.array(False if self.no_terminal else done)\n obs['reset'] = np.array(False)\n obs['scans'] = raw_obs['scans'][0]\n # import ipdb\n # ipdb.set_trace()\n\n self.episode.append(obs.copy())\n if done:\n # import ipdb\n # ipdb.set_trace()\n # print(self.episode)\n episode = {k: np.array([t[k] for t in self.episode]) for k in self.episode[0]}\n info['episode'] = episode\n\n return obs, reward, done, info\n\n\ndef test_env(debug=False):\n env_cfg = json.load(open(os.path.join(path_filler('config'), 'rlf110_env_cfg.json')))\n env_cfg['display_lidar'] = True\n env_cfg['obs_shape'] = 1080\n # import ipdb; ipdb.set_trace()\n env = create_f110env(**env_cfg)\n policy = GapFollowPolicy()\n # policy = RandomPolicy(action_space=env.action_space)\n wp_manager = Waypoints_Manager(save_wp=False)\n\n for ep_i in range(5):\n obs = env.reset()\n done = False\n # env.render()\n i = 0\n min_obs = []\n while not done:\n i += 1\n env.render()\n steer = 0\n # speed = np.random.rand()*5\n speed = 1\n # print(speed, steer)\n # action = env.action_space.sample()\n # action = np.array([steer, speed])\n\n ##### use policy ######\n action, metric = policy(obs)\n # target_idx = metric['target_idx']\n # print(action)\n obs, step_reward, done, info = env.step(action)\n # print(obs['reward'])\n ##### random #######\n # obs, step_reward, done, info = env.step(0)\n if env.display_lidar:\n env.lidarManager.update_lidar_windows(wait=1, obs=obs)\n if i % 10 == 0:\n # print(f'step_reward: {step_reward}')\n # print(min(obs['vecobs']))\n # print(env.wpManager.get_lateral_error(obs['raw_obs']))\n # print(step_reward)\n pass\n # if i > 100 and i % 5 == 0:\n # wp_manager.save_wp(obs['raw_obs'])\n # print(done)\n # print(obs)\n\n # time.sleep(1)\n # min_obs.append(min(obs))\n # print(step_reward)\n # if i % 30 == 0:\n # plt.plot(obs)\n # plt.title(f'dimension=54')\n # plt.show()\n print('finish one episode')\n\n\nif __name__ == '__main__':\n # wp_manager = Waypoints_Manager(wp_path='./new_wp.csv', save_wp=False, load_wp=True)\n # wp_manager.draw_wp()\n test_env(debug=True)\n","repo_name":"zzjun725/f1tenth_rl","sub_path":"examples/RL_example/f110_rlenv.py","file_name":"f110_rlenv.py","file_ext":"py","file_size_in_byte":18098,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"13203985534","text":"import sys\r\nimport mido\r\n\r\ndef play_file(nomefile, scelta=False, porta=None):\r\n\tm=mido.MidiFile(nomefile)\r\n\tif m:\r\n\t\tnomi=mido.get_output_names()\r\n\t\tif nomi:\r\n\t\t\tif scelta:\r\n\t\t\t\tprint(\"Midi Ports:\")\r\n\t\t\t\ti = 0\r\n\t\t\t\tfor p in nomi:\r\n\t\t\t\t\tprint(i, \" -- \", p)\r\n\t\t\t\t\ti = i+1\r\n\t\t\t\tpnum = int(input(\"select:\"))\r\n\t\t\telse:\r\n\t\t\t\tif porta != None:\r\n\t\t\t\t\tprint( \"port number: \", porta )\r\n\t\t\t\t\tpnum = porta\r\n\t\t\t\telse:\r\n\t\t\t\t\tpnum = -1\r\n\t\t\t\t\tfor nn in range(len(nomi)):\r\n\t\t\t\t\t\tif \"VirtualMIDI\" in nomi[nn]:\r\n\t\t\t\t\t\t\tpnum = nn\r\n\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\tif pnum == -1:\t\t\r\n\t\t\t\t\t\tpnum = 0\r\n\t\t\tprint(\"opening Midi port: \", nomi[pnum])\r\n\t\t\tport=mido.open_output(nomi[pnum])\r\n\t\t\tif port:\r\n\t\t\t\ttry:\r\n\t\t\t\t\t[port.send(x) for x in m.play()]\r\n\t\t\t\texcept KeyboardInterrupt:\r\n\t\t\t\t\tprint(\"stopped\")\r\n\r\nlsys = len(sys.argv)\t\t\t\t\t\r\nif lsys == 2:\r\n\tplay_file(sys.argv[1], True)\r\nelif lsys == 3:\r\n\tif sys.argv[2] in list(\"0123456789\"):\r\n\t\tplay_file(sys.argv[1], scelta=False, porta=int(sys.argv[2]))\r\n\telse:\r\n\t\tplay_file(sys.argv[1], scelta=False)\r\n\t\t","repo_name":"remigiococo/midiutils","sub_path":"playmid.py","file_name":"playmid.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11141129366","text":"# from os import linesep\n\n\n# question 1\n# level 1\n# question:\n# write a program\n# which will find all such numbers which are divisible dy 7 but are not amultiple of 5\n# between 2000 and 3200 (both included)\n# the numbers obtained should be prited in comma separated sequence on sigle line\n# hints:\n# cosider use range(#begin,#end) method\n# solution:\nl=[]\nfor i in range(2000, 3201):\n if(i%7==0)and(i%5==0):\n l.append(i)\nprint(l)\n \n","repo_name":"Narendra-1997/Python-Basic-Programs","sub_path":"programms_100/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10171379071","text":"from scipy.linalg import norm\nimport numpy as np\n\nclass GP_Regressor:\n\n def __init__(self,Xtrain,Ytrain,width,noise): # initialize the parameters\n\n self.Xtrain=Xtrain\n self.Ytrain=Ytrain\n self.width=width\n self.noise=noise\n self.I=np.identity(len(self.Xtrain)) # identity matrix\n self.Sigma=utils.gaussianKernel(self.Xtrain,self.Xtrain,self.width)+(noise**2)*self.I\n self.Sigma_inv=np.linalg.inv(self.Sigma)\n\n\n def predict(self,Xtest):\n self.Xtest = Xtest\n self.I=np.identity(len(self.Xtest)) # identity matrix\n\n self.Sigma_star=utils.gaussianKernel(self.Xtrain,self.Xtest,self.width)\n self.Sigma_star_trans=self.Sigma_star.T\n self.Sigma_starstar=utils.gaussianKernel(self.Xtest,self.Xtest,self.width)+(noise**2)*self.I\n mean=self.Sigma_star_trans.dot(np.linalg.inv(self.Sigma).dot(self.Ytrain)) # mean\n C=self.Sigma_starstar-self.Sigma_star_trans.dot(np.linalg.inv(self.Sigma).dot(self.Sigma_star)) # covariance\n return mean,C\n\n def loglikelihood(self,Xtest,Ytest):\n mean,C=self.predict(Xtest)\n C_inv=np.linalg.inv(C)\n logp_yf=-0.5*((Ytest-mean).T).dot(C_inv.dot((Ytest-mean)))-0.5*np.log(np.linalg.det(C_inv))-(len(Xtest)/2)*np.log(2*np.pi)\n return logp_yf\n","repo_name":"vss245/tu-ml","sub_path":"gaussian_process.py","file_name":"gaussian_process.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36454244647","text":"import joblib\nfrom gensim.utils import simple_preprocess\n\nclass Neural:\n def decoder(self, pred):\n if pred == '__label__0':\n return 'Справка'\n if pred == '__label__1':\n return 'Поиск_курса'\n if pred == '__label__2':\n return 'Поиск_по_курсу'\n if pred == '__label__3':\n return 'Задолжности'\n if pred == '__label__4':\n return 'Новое_за_период'\n\n\n def predict(self, string, decode):\n\n string = ' '.join(simple_preprocess(string))\n filename1 = \"my_model.joblib\" # путь до модели \n filename2 = \"vectorizer.joblib\" # путь до векторизатора\n\n loaded_model = joblib.load(filename1) # модель\n loaded_vect = joblib.load(filename2) # векторизатор\n\n pred = loaded_model.predict(loaded_vect.transform([string]))\n\n if decode: # второй аргумент в функции, переводит в читаемый формат при True, возвращает id при False\n return self.decoder(pred[0])\n else:\n return pred[0][-1]\n","repo_name":"Ikiso/hakaton","sub_path":"chatbot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35453163927","text":"# Mengubah suatu tipe data menjadi tipe daya yang lain\n\n# 1. Mengubah string menjadi number\n# int() - > Mengubah tipe data menjadi int\nx = '10'\nprint(type(x)) # mengecek tipe data dari sebuah variable\ny = '5'\nx = int(x)\ny = int(y)\nz = x + y\nprint(type(z))\n\n# 2. Mengubah int menjadi float\nz = float(z)\nprint(z)\n\n# 3.Mengubah tipe data menadi string\nz = str(z)\nprint(type(z))\n\na = input(\"a: \") #by default menghasilkan tipe data string\nb = input(\"b: \")\nprint(a+b)","repo_name":"grithaq/Python_ZTH_Trainer","sub_path":"Variable_dan_tipe_data/casting.py","file_name":"casting.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42405463547","text":"'''\n https://leetcode.com/problems/maximum-score-words-formed-by-letters/\n\n 1255. Maximum Score Words Formed by Letters\n\n\n Given a list of words, list of single letters (might be repeating) and score of every character.\n\n Return the maximum score of any valid set of words formed by using the given letters (words[i] cannot be used two or more times).\n\n It is not necessary to use all characters in letters and each letter can only be used once. Score of letters 'a', 'b', 'c', ... ,'z'\n is given by score[0], score[1], ... , score[25] respectively.\n'''\n\n'''\n Accepted\n'''\n\nclass Solution:\n def maxScoreWords(self, words: [str], letters: [str], score: [int]) -> int:\n # returns true if a word can be formed by given letters\n def isValidSet(wordSet, letterCount):\n for letter in wordSet:\n if letter not in letterCount or letterCount[letter] < wordSet[letter]:\n return False\n\n return True\n\n def calculateSetScore(wordSet, letterScore):\n setScore = 0\n\n for letter in wordSet:\n setScore += wordSet[letter] * letterScore[letter]\n\n return setScore\n\n '''\n Start: Preprocessing\n '''\n # first, we map each character to its score\n letterScore = {}\n\n for i in range(0, len(score)):\n letterScore[chr(ord('a') + i)] = score[i]\n\n # second, we map the letters to their count to quickly check if a letter is present and its freq\n letterCount = {}\n\n for letter in letters:\n if letter not in letterCount:\n letterCount[letter] = 0\n\n letterCount[letter] += 1\n\n # third, we transform every word into a map of letter counts\n # we ignore in the process all the words that can't be formed by our given letters\n bagOfWords = []\n\n for word in words:\n wordLetterCount = {}\n\n isValidWord = True\n\n for letter in word:\n if letter not in letterCount:\n isValidWord = False\n break\n\n if letter not in wordLetterCount:\n wordLetterCount[letter] = 0\n\n wordLetterCount[letter] += 1\n\n if isValidWord:\n bagOfWords.append(wordLetterCount)\n\n '''\n End: Preprocessing\n '''\n\n '''\n Start: Solution\n '''\n emptyWordSet = {}\n\n for letter in letterCount:\n emptyWordSet[letter] = 0\n\n # each wordSet is a tuple of letters in the set and the indices of words used\n wordSets = [(emptyWordSet, [])]\n\n # to keep track of sets we've seen before\n seenSets = []\n\n maxScore = 0\n\n while len(wordSets) > 0:\n # print(wordSets)\n newWordSets = []\n\n for wordSet in wordSets:\n for i in range(0, len(bagOfWords)):\n # we haven't used this word in this set yet\n if i not in wordSet[1]:\n # try appending the current word to the set and see if we have a valid set\n for letter in bagOfWords[i]:\n wordSet[0][letter] += bagOfWords[i][letter]\n\n if wordSet[0] not in seenSets:\n seenSets.append(wordSet[0].copy())\n\n if isValidSet(wordSet[0], letterCount):\n # add this set to our new list of sets\n indices = wordSet[1].copy()\n indices.append(i)\n\n newWordSets.append((wordSet[0].copy(), indices))\n\n wordSetScore = calculateSetScore(wordSet[0], letterScore)\n\n maxScore = max(maxScore, wordSetScore)\n\n # reset this wordSet so we can try adding another word to it\n for letter in bagOfWords[i]:\n wordSet[0][letter] -= bagOfWords[i][letter]\n\n wordSets = newWordSets\n\n '''\n End: Solution\n '''\n\n return maxScore\n\n\n# words = [\"add\", \"dda\", \"bb\", \"ba\", \"add\"]\n# letters = [\"a\", \"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\", \"c\", \"c\", \"d\", \"d\", \"d\"]\n# score = [3, 9, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\nwords = [\"daeagfh\",\"acchggghfg\",\"feggd\",\"fhdch\",\"dbgadcchfg\",\"b\",\"db\",\"fgchfe\",\"baaedddc\"]\nletters =[\"a\",\"a\",\"a\",\"a\",\"a\",\"a\",\"a\",\"b\",\"b\",\"b\",\"b\",\"b\",\"b\",\"b\",\"b\",\"b\",\"c\",\"c\",\"c\",\"c\",\"c\",\"c\",\"c\",\"c\",\"c\",\"c\",\"c\",\"d\",\"d\",\"d\",\"d\",\"d\",\"d\",\"d\",\"d\",\"d\",\"d\",\"d\",\"d\",\"d\",\"d\",\"e\",\"e\",\"e\",\"e\",\"e\",\"e\",\"e\",\"e\",\"e\",\"e\",\"f\",\"f\",\"f\",\"f\",\"f\",\"f\",\"f\",\"f\",\"f\",\"f\",\"f\",\"f\",\"f\",\"f\",\"g\",\"g\",\"g\",\"g\",\"g\",\"g\",\"g\",\"g\",\"g\",\"g\",\"g\",\"g\",\"h\",\"h\",\"h\",\"h\",\"h\",\"h\",\"h\",\"h\",\"h\",\"h\",\"h\",\"h\",\"h\"]\nscore = [2,1,9,2,10,5,7,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\nprint(Solution().maxScoreWords(words, letters, score))\n","repo_name":"hnc01/online-judge","sub_path":"LeetCode/hard/maximum_score_words_formed_by_letters.py","file_name":"maximum_score_words_formed_by_letters.py","file_ext":"py","file_size_in_byte":5043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70767213705","text":"\nimport numpy as np\nimport random\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom keras.models import model_from_json\nfrom keras import backend as K\n\nfrom annealers.paired_state_annealer import Annealer\nfrom utils.PER_memory_tree import Memory\n\nclass DQNAgent:\n\n def __init__(self, environment, memory_size=500):\n self.environment = environment\n self.furthest_distance = int(np.amax(self.environment.distance_matrix))\n self.max_node_degree = int(np.max(np.sum(self.environment.adjacency_matrix, axis=1)))\n self.memory_size = memory_size\n\n self.gamma = 0.6\n self.epsilon = 1.0\n self.epsilon_min = 0.001\n self.epsilon_decay = 0.9\n self.learning_rate = 0.001\n\n self.fix_learning_bug = True\n\n self.NN_state_size = self.furthest_distance+1+self.max_node_degree+1\n self.current_model = self.build_model(self.NN_state_size)\n self.target_model = self.build_model(self.NN_state_size)\n\n self.update_target_model()\n\n self.memory_tree = Memory(memory_size)\n self.annealer = Annealer(self, environment)\n\n def build_model(self, furthest_distance):\n \"\"\"\n Build the neural network model for this agent\n \"\"\"\n\n input_size = furthest_distance * 2\n\n model = Sequential()\n model.add(Dense(32, input_dim=input_size, activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(1, activation='linear'))\n model.compile(loss='mse',\n optimizer=Adam(lr=self.learning_rate))\n return model\n\n def update_target_model(self):\n \"\"\"\n Copy weights from the current model to the target model\n \"\"\"\n self.target_model.set_weights(self.current_model.get_weights())\n\n def save_model(self, model_name=None):\n # Serialize model to JSON\n model_json = self.current_model.to_json()\n\n if model_name is not None:\n filepath = \"./models/\" + model_name\n else:\n filepath = \"./models/agent_model\"\n\n with open(filepath + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n\n # Serialize weights to HDF5\n self.current_model.save_weights(filepath + \".h5\")\n print(\"Saved model to disk\")\n\n def load_model(self, model_name=None):\n self.epsilon = self.epsilon_min\n\n if model_name is not None:\n filepath = \"./models/\" + model_name\n else:\n filepath = \"./models/agent_model\"\n\n # Load json and create model\n json_file = open(filepath + '.json', 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n self.current_model = model_from_json(loaded_model_json)\n\n # Load weights into new model\n self.current_model.load_weights(filepath + \".h5\")\n self.current_model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n self.update_target_model()\n print(\"Loaded model from disk\")\n\n def remember(self, state, reward, next_state, done):\n \"\"\"\n Store experience in the memory tree\n \"\"\"\n self.memory_tree.store((state, reward, next_state, done))\n\n def generate_random_action(self, protected_nodes):\n \"\"\"\n Generates a random layer of swaps\n Care is taken to ensure that all swaps can occur in parallel\n That is, no two neighbouring edges undergo a swap simultaneously\n \"\"\"\n\n action = np.array([0] * len(self.environment.edge_list)) # an action representing an empty layer of swaps\n\n edges = [(n1,n2) for (n1,n2) in self.environment.edge_list]\n\n if not self.fix_learning_bug:\n edges = list(filter(lambda e: e[0] not in protected_nodes and e[1] not in protected_nodes, edges))\n\n edge_index_map = {edge: index for index,edge in enumerate(edges)}\n\n if self.fix_learning_bug:\n edges = list(filter(lambda e: e[0] not in protected_nodes and e[1] not in protected_nodes, edges))\n\n while len(edges) > 0:\n edge = random.sample(edges, 1)[0]\n action[edge_index_map[edge]] = 1\n\n # This also removes the sampled edge\n edges = [e for e in edges if e[0] not in edge and e[1] not in edge]\n\n return action\n\n def obtain_distance_vector(self, current_state):\n \"\"\"\n Obtains a vector that summarises the different distances\n from qubits to their targets.\n\n More precisely, x_i represents the number of qubits that are\n currently a distance of i away from their targets.\n\n If there are n qubits, then the length of this vector\n will also be n.\n \"\"\"\n\n qubit_locations, qubit_targets, _, protected_nodes = current_state\n\n nodes_to_target_qubits = \\\n [qubit_targets[qubit_locations[n]] for n in range(0,len(qubit_locations))]\n\n nodes_to_target_nodes = [next(iter(np.where(np.array(qubit_locations) == q)[0]), -1) \\\n for q in nodes_to_target_qubits]\n\n distance_vector = [0 for _ in range(self.furthest_distance+1)]\n\n for n in range(len(nodes_to_target_nodes)):\n target = nodes_to_target_nodes[n]\n\n if target == -1:\n continue\n\n d = int(self.environment.distance_matrix[n][target])\n\n if d > 1 or nodes_to_target_nodes[target] != n:\n distance_vector[d] += 1\n else:\n distance_vector[d-1] += 1\n\n\n best_swaps_vector = [0 for _ in range(self.max_node_degree+1)]\n\n for node, target in enumerate(nodes_to_target_nodes):\n if target == -1:\n continue\n\n dist = self.environment.distance_matrix[node][target]\n\n if dist == 1:\n continue\n\n neighbours = np.where(self.environment.adjacency_matrix[node] == 1)[0]\n candidate_neighbours = []\n\n for neighbour in neighbours:\n if self.environment.distance_matrix[neighbour][target] == dist-1 \\\n and neighbour not in protected_nodes:\n candidate_neighbours.append(neighbour)\n\n # print('Node ' + str(node) + ' with target ' + str(target) + ' has candidate neighbours: ' + str(candidate_neighbours))\n\n best_swaps_vector[len(candidate_neighbours)] += 1\n\n\n return distance_vector + best_swaps_vector\n\n def get_NN_input(self, current_state, next_state):\n current_state_distance_vector = self.obtain_distance_vector(current_state)\n next_state_distance_vector = self.obtain_distance_vector(next_state)\n\n return np.reshape(np.array(current_state_distance_vector + next_state_distance_vector), \\\n (1,len(current_state_distance_vector)*2))\n\n def get_quality(self, current_state, next_state, action_chooser='model'):\n neural_net_input = self.get_NN_input(current_state, next_state)\n\n if action_chooser == 'model':\n Qval = self.current_model.predict(neural_net_input)[0]\n elif action_chooser == 'target':\n Qval = self.target_model.predict(neural_net_input)[0]\n\n return Qval\n\n def act(self, current_state):\n \"\"\"\n Chooses an action to perform in the environment and returns it\n (i.e. does not alter environment state)\n \"\"\"\n\n protected_nodes = current_state[3]\n\n if np.random.rand() <= self.epsilon:\n action = self.generate_random_action(protected_nodes)\n return action, \"Random\"\n\n # Choose an action using the agent's current neural network\n action, _ = self.annealer.simulated_annealing(current_state, action_chooser='model')\n return action, \"Model\"\n\n def replay(self, batch_size, print_experiences=False):\n \"\"\"\n Learns from past experiences\n \"\"\"\n\n tree_index, minibatch, ISweights = self.memory_tree.sample(batch_size)\n minibatch_with_weights = zip(minibatch,ISweights)\n absolute_errors = []\n\n for experience, ISweight in minibatch_with_weights:\n [state, reward, next_state, done] = experience[0]\n\n NN_input = self.get_NN_input(state, next_state)\n Qval = self.get_quality(state, next_state)\n\n if done:\n target = reward\n else:\n _, energy = self.annealer.simulated_annealing(next_state, action_chooser='target', search_limit=10)\n bonus = -energy\n target = reward + self.gamma * bonus\n\n absolute_error = abs(Qval - target)\n absolute_errors.append(absolute_error)\n\n target_exp = np.sum(NN_input[:,:self.furthest_distance]) == 2\n\n if print_experiences and target_exp:\n print()\n print(np.reshape(self.obtain_targets(state), (self.environment.rows, self.environment.cols)))\n print()\n print(np.reshape(self.obtain_targets(next_state), (self.environment.rows, self.environment.cols)))\n print()\n print('Rep:', NN_input)\n print()\n print('Prediction:', Qval)\n print('Reward:', reward)\n print('Bonus:', target - reward)\n print('Total:', target)\n print('------')\n print()\n\n self.current_model.fit(NN_input, [target], epochs=1, verbose=0, sample_weight=ISweight)\n\n self.memory_tree.batch_update(tree_index, absolute_errors)\n\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n def obtain_targets(self, current_state):\n \"\"\"\n Obtains a list that maps nodes to their targets\n More precisely, a node n1 targets another node n2\n iff n1 holds q1 and n2 holds q2 and q1 targets q2\n \"\"\"\n\n qubit_locations = np.array(current_state[0])\n qubit_targets = current_state[1]\n\n nodes_to_target_qubits = \\\n [qubit_targets[qubit_locations[n]] for n in range(0,len(qubit_locations))]\n\n return np.reshape(np.array(nodes_to_target_qubits), (1,len(qubit_locations)))\n\n def used_up_memory_capacity(self):\n return self.memory_tree.tree.used_up_capacity\n","repo_name":"Macro206/qubit-routing-with-rl","sub_path":"agents/paired_state_agent.py","file_name":"paired_state_agent.py","file_ext":"py","file_size_in_byte":10372,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"81"} +{"seq_id":"17539714004","text":"if __name__ == '__main__':\n n = int(input())\n arr = map(int, input().split())\n\n neslist = list(set(arr))\n neslist.sort()\n\n print(neslist[-2])\n\n # newlist = [x for x in arr if x < max(arr)]\n\n # print(newlist)","repo_name":"Anib999/hackerrankques","sub_path":"q9ans.py","file_name":"q9ans.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9897152486","text":"from odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\nfrom .constants_user_groups import portal_user\nfrom .constants_trang_thai_sinh_vien import trang_thai_sv_dang_hoc\nfrom .constants_of_selection_fields import trang_thai_sinh_vien\nimport re\n\n\nclass SinhVien(models.Model):\n _name = \"sinh_vien\"\n _description = \"Sinh viên\"\n _inherit = [\"tac_nhan\"]\n _rec_name = \"ma_dinh_danh\"\n\n user_group_string = portal_user\n vai_tro_string = \"sinh_vien\"\n # giá trị này lấy trong base_groups.xml tương ứng với loại user\n\n # giới tính kế thừa từ trường gioi_tinh của model res_partner\n #\n ghi_chu = fields.Text(\"Ghi chú\") # ghi chu tt1\n # Hồ sơ sinh viên Edusoft\n # MaSV = fields.Char('Mã sinh viên', size=50, required=True)\n\n AvatarSV = fields.Image(\"Avatar sinh viên\")\n # MaDT = fields.Char('Mã dân tộc', size=50\n MaDT = fields.Selection(\n [\n (\"1\", \"Kinh\"),\n (\"2\", \"Tày\"),\n (\"3\", \"Thái\"),\n (\"4\", \"Hoa\"),\n (\"5\", \"Khmer\"),\n (\"6\", \"Mường\"),\n (\"7\", \"Nùng\"),\n (\"8\", \"HMông\"),\n (\"9\", \"Dao\"),\n (\"10\", \"Gia-rai\"),\n (\"11\", \"Ngái\"),\n ],\n string=\"Dân tộc\",\n )\n # fields.Selection([('0', 'Nội chú'),\n # ('1', 'Thường chú'),\n # ('2', 'Tạm chú')], 'Loại cư trú')\n MaTG = fields.Char(\"Mã tôn giáo\", size=50)\n MaNg = fields.Many2one(comodel_name=\"quan_ly_nganh_hoc.nganh\",\n ondelete=\"set null\",\n string=\"Ngành\")\n MaChngNg = fields.Many2one(\n comodel_name=\"quan_ly_nganh_hoc.chuyen_nganh\",\n ondelete=\"set null\",\n string=\"Chuyên ngành\",\n )\n TheThuVien = fields.Char(\"Thẻ thư viện\", size=500)\n SoTaiKhoan = fields.Char(\"Số tài khoản ngân hàng\", size=500)\n MaNH = fields.Char(\"Mã ngân hàng\", size=50)\n CHINHANHNH = fields.Char(\"Chi nhánh ngân hàng\", size=500)\n SoNamBD = fields.Integer(\"Số năm bộ đội\")\n SoNamTNXP = fields.Integer(\"Số năm thanh niên xung phong\")\n CanNang = fields.Float(\"Cân nặng\")\n ChieuCao = fields.Float(\"Chiều cao\")\n MADBQUOCTICH = fields.Char(\"Mã DB quốc tịch\", size=500) # Quốc tịch\n CoNghe = fields.Integer(\"Có nghề?\")\n CQCT = fields.Char(\"Tên cơ quan công tác\", size=500)\n CVCQCT = fields.Char(\"Chức vụ cơ quan công tác\", size=500)\n DangHoc = fields.Integer(\"Đang học\")\n DC_DCHK = fields.Char(\"Địa chỉ hộ khẩu\", size=500) # hộ khẩu\n DC_DCHK2 = fields.Char(\"Địa chỉ hộ khẩu 2\", size=500)\n DC_DCLLSV = fields.Char(\"Địa chỉ liên lạc\", size=500)\n DC_DCTT = fields.Char(\"Địa chỉ thường trú\", size=500) # thường trú\n DC_DCVC = fields.Char(\"Đia chỉ vợ/chồng\", size=500)\n DC_DT1HK = fields.Char(\"Tel hộ khẩu\", size=500)\n DC_DT1LLSV = fields.Char(\"Điện thoại liên lạc\", size=500) # Số điện thoại\n DC_DT1TT = fields.Char(\"Tel thường chú\", size=500)\n DC_DT1VC = fields.Char(\"Tel vợ/chồng\", size=500)\n DC_DT2HK = fields.Char(\"Tel hộ khẩu 2\", size=500)\n DC_DT2LLSV = fields.Char(\"Điện thoại liên lạc 2\", size=500)\n DC_DT2TT = fields.Char(\"Tel thường chú 2\", size=500)\n DC_DT2VC = fields.Char(\"Tel vợ/chồng 2\", size=500)\n DC_Eml1HK = fields.Char(\"Email hộ khẩu\", size=500)\n DC_EML1LLSV = fields.Char(\"Email liên lạc\", size=500)\n DC_Eml1TT = fields.Char(\"Email thường chú\", size=500)\n DC_Eml1VC = fields.Char(\"Email vợ/chồng\", size=500)\n DC_Eml2HK = fields.Char(\"Email hộ khẩu 2\", size=500)\n DC_EML2LL = fields.Char(\"Email liên lạc 2\", size=500) # Trùng dc_eml2llsv?\n DC_Eml2TT = fields.Char(\"Email thường chú 2\", size=500)\n DC_Eml2VC = fields.Char(\"Email vợ/chồng 2\", size=500)\n DC_GcHK = fields.Char(\"Ghi chú địa chỉ hộ khẩu\", size=500)\n DC_GcLL = fields.Char(\"Ghi chú địa chỉ liên lạc\", size=500)\n DC_GcTT = fields.Char(\"Ghi chú địa chỉ thường chú\", size=500)\n DC_GcVC = fields.Char(\"Ghi chú địa chỉ vợ chồng\", size=500)\n MADBHKSV = fields.Char(\"Mã DB HK SV\", size=500)\n MADBLLSV = fields.Char(\"Mã DB LL SV\", size=500)\n MADBTTSV = fields.Char(\"Mã DB TT SV\", size=500)\n MADBVCSV = fields.Char(\"Mã DB VC SV\", size=500)\n DCCQCT = fields.Char(\"Địa chỉ cơ quan công tác\", size=500)\n DKKinhTe = fields.Selection([(\"0\", \"Có khả năng\"), (\"1\", \"Cố gắng\"),\n (\"2\", \"Khó khăn\")], \"Điều kiện kinh tế\")\n GcSV = fields.Char(\"Ghi chú sinh viên\", size=4000)\n GcThPhanGD = fields.Char(\"Ghi chú thành phần gia đình\", size=4000)\n MADBCQCTSV = fields.Char(\"Mã ĐB CQ CT\", size=500)\n LoaiCuTru = fields.Selection([(\"0\", \"Nội chú\"), (\"1\", \"Thường chú\"),\n (\"2\", \"Tạm chú\")], \"Loại cư trú\")\n MatCha = fields.Integer(\"Cha đã mất?\")\n MatMe = fields.Integer(\"Mẹ đã mất?\")\n NgayNgTru = fields.Datetime(\"Ngày ngoại trú\")\n NgNghiepVC = fields.Char(\"Nghề nghiệp vợ/chồng\", size=500)\n NgSinhCha = fields.Char(\"Ngày sinh cha\", size=500)\n NgSinhMe = fields.Char(\"Ngày sinh mẹ\", size=500)\n NguoiLL = fields.Char(\"Người liên lạc\", size=500)\n NguonTuyen = fields.Selection(\n [\n (\"0\", \"Trúng tuyển\"),\n (\"1\", \"Chuyển trường\"),\n (\"2\", \"Cử tuyển\"),\n (\"3\", \"Xét tuyển\"),\n (\"4\", \"Tuyển thẳng\"),\n (\"5\", \"Nguồn khác\"),\n ],\n \"Nguồn tuyển\",\n )\n NNgCha = fields.Char(\"Nghề nghiệp cha\", size=500)\n NNgMe = fields.Char(\"Nghề nghiệp mẹ\", size=500)\n NoiCapCMND = fields.Char(\"Nơi cấp CMND\", size=500)\n PhongKTX = fields.Char(\"Phòng kí túc xá\", size=500)\n QueQuan = fields.Char(\"Quê quán\", size=500)\n SoAnhChiEm = fields.Integer(\"Số anh chị em\")\n TenCha = fields.Char(\"Tên cha\", size=500)\n TenChuHo = fields.Char(\"Tên chủ hộ\", size=500)\n TenChuHoHK = fields.Char(\"Tên chủ hộ (HK)\", size=500)\n TenMe = fields.Char(\"Tên mẹ\", size=500)\n TenVC = fields.Char(\"Tên vợ/chồng\", size=500)\n ThPhanGD = fields.Selection(\n [(\"0\", \"Công nhân viên chức\"), (\"1\", \"Nhà nước\"), (\"2\", \"Khác\")],\n \"Thành phần gia đình\",\n )\n loaiNoiSinh = fields.Selection([\n (\"0\", \"Trong nước\"),\n (\"1\", \"Nước ngoài\")\n ], string=\"Loại nơi sinh\")\n noiSinhNuocNgoai = fields.Char(\"Nơi sinh nước ngoài\")\n\n TrChuyen = fields.Char(\"Trường chuyển\", size=500)\n # NgCapCMND = fields.Char('Ngày cấp CMND', size=500)\n NgCapCMND = fields.Date(\"Ngày cấp CMND\")\n MADBNOISINHSV = fields.Char(\"Mã ĐB nơi sinh\", size=500) # Nơi sinh\n DC_EML2LLSV = fields.Char(\"Email 2 liên lạc sinh viên\", size=500)\n DC_DT1Me = fields.Char(\"Điện thoại liên lạc mẹ\", size=500)\n DC_DT1Cha = fields.Char(\"Điện thoại liên lạc cha\", size=500)\n GhiChuView1 = fields.Char(\"Ghi chú dùng bên detailright\", size=500)\n TenHo = fields.Char(\"Tên hộ\", size=500)\n DC_DTNgLL = fields.Char(\"Điện thoại người liên lạc\", size=500)\n DC_EmlNgLL = fields.Char(\"Email người liên lạc\", size=500)\n SoBD = fields.Char(\"Số báo danh\", size=500)\n Diem1 = fields.Float(\"Điểm 1\")\n Diem2 = fields.Float(\"Điểm 2\")\n Diem3 = fields.Float(\"Điểm 3\")\n Diem4 = fields.Float(\"Điểm 4\")\n DiemUT = fields.Float(\"Điểm ưu tiên\")\n Diem5 = fields.Float(\"Điểm 5\")\n DiemTong = fields.Float(\"Điểm tổng\")\n # NganhThi = fields.Char('Ngành thi', size=500)\n NganhThi = fields.Many2one(comodel_name=\"quan_ly_nganh_hoc.nganh\",\n ondelete=\"set null\",\n string=\"Ngành thi\")\n KhoiThi = fields.Char(\"Khối thi\", size=500)\n NhomKVTS = fields.Selection(\n [\n (\"Khu vực 1\", \"1\"),\n (\"Khu vực 2 nông thôn\", \"2NT\"),\n (\"Khu vực 2\", \"2\"),\n (\"Khu vực 3\", \"3\"),\n ],\n \"Nhóm khu vực\",\n )\n DOITUONGTS = fields.Selection(\n [\n (\"0\", \"Không có đối tượng\"),\n (\"1\", \"Đối tượng 01\"),\n (\"2\", \"Đối tượng 02\"),\n (\"3\", \"Đối tượng 03\"),\n (\"4\", \"Đối tượng 04\"),\n (\"5\", \"Đối tượng 05\"),\n (\"6\", \"Đối tượng 06\"),\n (\"7\", \"Đối tượng 07\"),\n (\"8\", \"Nhóm đối tượng 3\"),\n ],\n \"Đối tượng tuyển sinh\",\n )\n NAMTN = fields.Integer(\"Năm tốt nghiệp PTTH\")\n MaSoThue = fields.Char(\"Mã số thuế\", size=500)\n TenDVThue = fields.Char(\"Tên đơn vị thuế\", size=500)\n TTSV = fields.Integer(\"Thứ tự sinh viên\")\n TGianCT = fields.Char(\"Thời gian thâm niên công tác\", size=500)\n DotTS = fields.Char(\"Đợt tuyển sinh\", size=500)\n DIEMCHUAN = fields.Float(\"Điểm chuẩn theo tổ hợp xét tuyển\")\n HocLuc = fields.Selection([(\"0\", \"Giỏi\"), (\"1\", \"Khá\"),\n (\"2\", \"Trung bình\"), (\"3\", \"Yếu\")], \"Học lực\")\n HanhKiem = fields.Selection([(\"0\", \"Tốt\"), (\"1\", \"Khá\"),\n (\"2\", \"Trung bình\"), (\"3\", \"Yếu\")],\n \"Hạnh kiểm\")\n SoBHSV = fields.Char(\"Số bảo hiểm sinh viên\", size=50) # Bảo hiểm y tế\n MaBVKCB = fields.Char(\"Mã bệnh viện KCB\", size=50)\n NgSinhGH = fields.Char(\"Ngày sinh giám hộ\", size=500)\n TenGiamHo = fields.Char(\"Tên giám hộ\", size=500)\n LoaiKhuyetTat = fields.Selection(\n [\n (\"1\", \"Khuyết tật vận động\"),\n (\"2\", \"Khuyết tật nghe nói\"),\n (\"3\", \"Khuyết tật nhìn\"),\n (\"4\", \"Khuyết tật thần kinh tâm thần\"),\n (\"5\", \"Khuyết tật trí tuệ\"),\n (\"6\", \"Khuyết tật khác\"),\n ],\n \"Loại khuyết tật\",\n )\n NNgGH = fields.Char(\"Nghề nghiệp giám hộ\", size=500)\n NgayNHOC = fields.Date(\"Ngày nhập học\")\n DVDKDTHI = fields.Char(\"Đơn vị đăng ký dự thi\", size=500)\n\n lop_hanh_chinh_ids = fields.Many2many(\"lop_hanh_chinh\",\n string=\"Danh sách lớp hành chính\")\n lop_hanh_chinh_id = fields.Many2one(\n comodel_name=\"lop_hanh_chinh\",\n compute=\"_compute_lop_hanh_chinh_hien_tai\",\n string=\"Lớp hành chính hiện tại\",\n store=True)\n sv_ltc_ds_ids = fields.One2many(\"sv_ltc_ds\",\n \"sinh_vien_id\",\n string=\"Sinh viên-lớp tín chỉ-điểm số\")\n sv_hp_ds_ids = fields.One2many(\n comodel_name=\"sv_hp_ds\",\n inverse_name=\"sinh_vien_id\",\n string=\"Kết quả học tập theo học phần\",\n )\n lop_tin_chi_ids = fields.Many2many(comodel_name=\"lop_tin_chi\",\n string=\"Danh sách lớp tín chỉ\")\n nhom_lop_tin_chi_ids = fields.Many2many(\n comodel_name=\"nhom_lop_tin_chi\", string=\"Danh sách nhóm lớp tín chỉ\")\n\n @api.depends(\"sv_ltc_ds_ids\")\n def _compute_danh_sach_lop_tin_chi(self):\n for record in self:\n if record.sv_ltc_ds_ids is not False:\n record.lop_tin_chi_ids = record.sv_ltc_ds_ids.mapped(\n \"lop_tin_chi_id\")\n else:\n record.lop_tin_chi_ids = False\n\n dot_nhap_hoc_id = fields.Many2one(\"dot_nhap_hoc\", string=\"Đợt nhập học\")\n khoa_nganh_id = fields.Many2one(\n comodel_name=\"khoa_nganh\",\n # related=\"lop_hanh_chinh_id.khoi_lop_id.khoa_nganh_id\",\n store=True,\n string=\"Khóa ngành\",\n )\n khoa_chuyen_nganh_id = fields.Many2one(\n comodel_name=\"khoa_chuyen_nganh\",\n related=\"lop_hanh_chinh_id.khoa_chuyen_nganh_id\",\n store=True,\n string=\"Khóa chuyên ngành\",\n )\n # khoa_nganh_id_import = fields.Many2one(\n # comodel_name=\"khoa_nganh\",\n # string=\"Khóa ngành (Import)\",\n # )\n nganh_id = fields.Many2one(comodel_name=\"quan_ly_nganh_hoc.nganh\",\n related=\"lop_hanh_chinh_id.nganh\",\n store=True,\n name=\"Ngành học\")\n ten_nganh = fields.Char(string=\"Tên ngành\",\n related=\"nganh_id.name.ten_nganh_hoc\",\n store=True)\n ma_nganh = fields.Char(string=\"Mã ngành\",\n related=\"nganh_id.name.ma_nganh_hoc\",\n store=True)\n hinh_thuc_dao_tao_id = fields.Many2one(comodel_name='hinh_thuc_dao_tao',\n string='Hình thức đào tạo')\n trinh_hinh_id = fields.Many2one(comodel_name=\"danh_muc.trinh_do_hinh_dao_tao\",\n string=\"Trình độ, hình thức đào tạo\")\n ten_don_vi = fields.Char(string=\"Tên đơn vị\",\n related=\"don_vi_id.ten_don_vi\",\n store=True)\n ma_don_vi = fields.Char(string=\"Mã đơn vị\",\n related=\"don_vi_id.ma_don_vi\",\n store=True)\n khoi_lop_id = fields.Many2one(comodel_name=\"khoi_lop\",\n related=\"lop_hanh_chinh_id.khoi_lop_id\",\n store=True,\n string=\"Khối lớp\")\n khoa_sinh_vien_id = fields.Many2one(\n \"khoa_sinh_vien\",\n string=\"Khóa sinh viên\",\n related=\"lop_hanh_chinh_id.khoa_sinh_vien\",\n store=True,\n )\n ctk_nganh_id = fields.Many2one(\n comodel_name=\"chuong_trinh_khung\",\n related=\"lop_hanh_chinh_id.chuong_trinh_khung_nganh_id\",\n # compute=\"_compute_chuong_trinh_khung\",\n store=True,\n string=\"Chương trình khung ngành\",\n )\n ctk_chuyen_nganh_id = fields.Many2one(\n comodel_name=\"chuong_trinh_khung\",\n related=\"lop_hanh_chinh_id.chuong_trinh_khung_chuyen_nganh_id\",\n store=True,\n string=\"Chương trình khung chuyên ngành\",\n )\n cong_no_id = fields.One2many(comodel_name=\"qldt.cong_no\",\n inverse_name=\"sinh_vien_id\",\n string=\"Công nợ\")\n diem_tich_luy = fields.Float(\n compute=\"_compute_diem_tich_luy\",\n store=True,\n string=\"Điểm TL (thang 10)\",\n )\n diem_tich_luy_thang_4 = fields.Float(\n compute=\"_compute_diem_tich_luy_thang_4\",\n store=True,\n string=\"Điểm TL (thang 4)\",\n )\n trang_thai = fields.Char(string=\"Trạng thái\", default=\"Đang học\")\n trang_thai_sinh_vien = fields.Selection(selection=trang_thai_sinh_vien,\n default=\"Đang học\",\n string=\"Trạng thái sinh viên\")\n\n @api.model\n def create(self, values):\n res = super(SinhVien, self).create(values)\n res.partner_id.sinh_vien_id = res.id\n return res\n\n def _create_user(self):\n user_group = self.env.ref(self.user_group_string) or False\n users_res = self.env['res.users']\n for record in self:\n if not record.user_id:\n if record.ngay_sinh:\n password = record.ngay_sinh.strftime(\"%d%m%Y\")\n else:\n password = \"ptitdu\"\n login = str(record.ma_dinh_danh)\n match = re.match(\n \"^[_a-z0-9-]+(\\.[_a-z0-9-]+)*@[a-z0-9-]+(\\.[a-z0-9-]+)*(\\.[a-z]{2,4})$\",\n login,\n )\n if match is None:\n login = login.upper()\n user_id = users_res.create({\n # 'name':\n # record.name,\n 'partner_id': record.partner_id.id,\n 'login': login,\n 'password': password,\n 'groups_id': user_group,\n # 'tz':\n # self._context.get('tz'),\n 'vai_tro': self.vai_tro_string,\n })\n record.user_id = user_id\n\n @api.depends('lop_hanh_chinh_ids')\n def _compute_lop_hanh_chinh_hien_tai(self):\n for record in self:\n if record.lop_hanh_chinh_ids:\n record.lop_hanh_chinh_id = record.lop_hanh_chinh_ids[-1]\n else:\n record.lop_hanh_chinh_id = False\n\n @api.depends(\"sv_hp_ds_ids\")\n def _compute_diem_tich_luy(self):\n \"\"\"\n TODO:\n 1. Cẩn sửa trang_thai -> trang_thai_sinh_vien\n \"\"\"\n for record in self:\n if record.sv_hp_ds_ids:\n if record.trang_thai == trang_thai_sv_dang_hoc:\n tong_so_tin_chi = 0\n tong_diem = 0\n for ban_ghi in record.sv_hp_ds_ids:\n if ban_ghi.diem_hoc_phan != 0:\n tong_diem += ban_ghi.diem_hoc_phan * ban_ghi.so_tin_chi\n tong_so_tin_chi += ban_ghi.so_tin_chi\n else:\n if ban_ghi.ghi_chu and ban_ghi.ghi_chu != \"\":\n continue\n if tong_so_tin_chi != 0:\n record.diem_tich_luy = tong_diem / tong_so_tin_chi\n else:\n record.diem_tich_luy = 0\n else:\n record\n\n @api.depends(\"diem_tich_luy\")\n def _compute_diem_tich_luy_thang_4(self):\n \"\"\"\n TODO:\n 1. Cẩn sửa trang_thai -> trang_thai_sinh_vien\n \"\"\"\n for record in self:\n if record.diem_tich_luy:\n # tính tạm, công thức này bị sai, công thức đúng là tính điểm tất cả các môn thang 4 * tín chỉ / tổng số tín chỉ\n if record.trang_thai == trang_thai_sv_dang_hoc:\n record.diem_tich_luy_thang_4 = record.diem_tich_luy * 0.4\n else:\n record.diem_tich_luy_thang_4 = 0","repo_name":"nminhquang380/odoo","sub_path":"models/sinh_vien.py","file_name":"sinh_vien.py","file_ext":"py","file_size_in_byte":18451,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32352465402","text":"# Во втором массиве сохранить индексы четных элементов первого массива. Например, если\n# дан массив со значениями 8, 3, 15, 6, 4, 2, то второй массив надо заполнить значениями 1, 4,\n# 5, 6 (или 0, 3, 4, 5 – если индексация начинается с нуля), так как именно в этих позициях\n# первого массива стоят четные числа.\n\nfirst_array = [8, 3, 15, 6, 4, 2]\nsecond_array = []\n\nfor key, value in enumerate(first_array):\n if value % 2 == 0:\n second_array.append(key)\n\nprint(f'{first_array}\\n{second_array}')\n","repo_name":"grgblb/GB-Python-Algorithms","sub_path":"lesson_3/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8861397406","text":"import cv2\nimport HandDetection as hd\nimport numpy as np\nimport time\nimport math\nfrom ctypes import cast, POINTER\nfrom comtypes import CLSCTX_ALL\nfrom pycaw.pycaw import AudioUtilities, IAudioEndpointVolume\n\n\ncap = cv2.VideoCapture(0)\nptime = 0\ndetecter = hd.handDetector(detectionCon=0.7)\n\ndevices = AudioUtilities.GetSpeakers()\ninterface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)\nvolume = cast(interface, POINTER(IAudioEndpointVolume))\n# volume.GetMute()\n# volume.GetMasterVolumeLevel()\nvolume_range = volume.GetVolumeRange()\n# (-65.25, 0.0, 0.03125)\n# minVol = -65.25. ie; volume[0]\n# maxVol = 0.0. ie; volume[1]\n\nminVol = volume_range[0]\nmaxVol = volume_range[1]\n\n# volume.SetMasterVolumeLevel(0.0, None)\n\n\n# FPS -> Frame per Seconds\nwhile True:\n ret, frame = cap.read()\n\n detecter.findHands(frame)\n poshand=detecter.findPosition(frame, draw=False)\n if len(poshand) > 0:\n # print(poshand[8], poshand[4])\n x1, y1 = poshand[8][1], poshand[8][2]\n x2, y2 = poshand[4][1], poshand[4][2]\n\n mx, my = (x1+x2)//2, (y1+y2)//2\n\n cv2.circle(frame,(x1,y1),15,(255,0,0),cv2.FILLED)\n cv2.circle(frame,(x2,y2),15,(255,0,0),cv2.FILLED)\n cv2.line(frame,(x1,y1),(x2,y2),(0,0,0),3)\n cv2.circle(frame,(mx,my),15,(110,230,88),cv2.FILLED)\n length = math.hypot(x2-x1,y2-y1)\n # print(length)\n # hand range 190 to 30\n # vol range -65 to 0\n\n vol = np.interp(length, [30, 190], [minVol, maxVol])\n\n # print(vol)\n volume.SetMasterVolumeLevel(vol, None)\n\n if length <= 30:\n cv2.circle(frame, (mx, my), 15, (0, 0, 255), cv2.FILLED)\n\n\n # fps code\n ctime = time.time()\n fps = 1 / (ctime - ptime)\n ptime = ctime\n\n cv2.putText(frame, \"FPS : \"+str(int(fps)), (10,50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)\n\n cv2.imshow(\"window\", frame)\n #cv2.waitKey(1) # returns the ASCII values of the key pressed\n\n if cv2.waitKey(1) == ord('x'):\n break\n if cv2.waitKey(1) == ord('d'):\n cv2.imwrite(\"dance`1````````````````````````````````````````7.png\",frame)\ncap.release()\ncv2.destroyAllWindows()\n\nprint(\"Thank You !!\")\n","repo_name":"DIVYADHARSHINI19/Hand-Detector","sub_path":"video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11842939601","text":"import start_app\nfrom features.helpers.appium_helpers import tap_image, pinch_or_zoom\nfrom features.helpers.coc_helpers import collect_gold\nfrom features.helpers.common_helpers import get_config_and_set_logging\n\n# Read Config file and set logging\nCONFIG, LOGGER = get_config_and_set_logging(\"config.yaml\", \"app_logs.log\", \"INFO\", __name__)\nQUERY_IMAGES_FLDR = CONFIG['Paths']['QueryImages']\nVISUALIZATIONS_FLDR = CONFIG['Paths']['Visualizations']\n\n\ndef collect_gold_and_elixir(driver):\n \"\"\"\n Collect Gold and Elixir\n :param driver: Appium driver\n :return: NA\n \"\"\"\n LOGGER.info(\"Pinch screen to see the whole village\")\n pinch_or_zoom(driver, 'pinch')\n\n LOGGER.info(\"Collect Gold\")\n collect_gold(driver)\n\n LOGGER.info(\"Collect Elixir\")\n tap_image(driver, \"elixir_collection_red_button.png\")\n\n\nif __name__ == '__main__':\n LOGGER.info(\"Collect Gold and Elixir\")\n","repo_name":"Navdit/automate_coc","sub_path":"features/collect_resources.py","file_name":"collect_resources.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7246138654","text":"import pandas as pd\ndata=pd.read_csv(\"/home/admin123/Documents/NUMPY/j.csv\",header=0)#header is for starting\n# print(data.head())#By default it took 5 lines\n# print(data.head(7))#It take 7 lines\n# print(data.describe())\n# print(data.iloc[3])\n# print(data.iloc[0:6])\n\n\ndata1=pd.read_csv(\"/home/admin123/Documents/NUMPY/j.csv\",header=0,index_col=0)#header is starting\nprint(data1.head())\nprint(data1.loc[\"kirti 11 45\"])\n\n","repo_name":"Subhkirti/Python_library","sub_path":"pand.py","file_name":"pand.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72176096585","text":"from fuzzylev import *\nfrom pybliometrics.scopus import ScopusSearch, AuthorSearch, AuthorRetrieval\n\ndef dattr(obj, attrs = 'parameters'):\n '''function showing list of object's attributes and their values as pandas dataframe'''\n import pandas as pd\n # dataframe of attributes not starting with _\n dobj = pd.DataFrame([attr for attr in dir(obj) if not attr.startswith('_')], columns=['attribute'])\n # values of attributes\n dobj['value'] = dobj['attribute'].apply(lambda x: getattr(obj, x))\n # set column attribute as an index\n dobj.set_index('attribute', inplace=True)\n # dictionary for output accordung to attributes types\n case = {'all':dobj,\n 'numerical':dobj[pd.to_numeric(dobj.value, errors='coerce').notnull()],\n 'parameters': dobj[~dobj.value.astype(str).str.startswith('<')],\n 'methods':dobj[dobj.value.astype(str).str.startswith('<')]}\n return case.get(attrs, 'invalid input')\n\ndef ScopusDocQuery(title, authorlastname=''):\n '''Scopus query for searching title and/or author's last name'''\n texttitle = f'TITLE(\"{title}\")'\n if authorlastname!='':\n authorlastname = clean_str(authorlastname, case=\"lower\")\n authorlastnamenounicode = clean_str(authorlastname, case=\"lower\", unicode=False)\n textauthor = f' AND AUTHLASTNAME({authorlastnamenounicode})'# OR AUTHLASTNAME({authorlastnamenounicode})'\n else:\n textauthor = ''\n return texttitle+textauthor\n\ndef ScopusAuthQuery(authorfirstname, authorlastname, initial=False):\n '''Scopus query for searching author'''\n lastname = clean_str(authorlastname, case=\"lower\", unicode=False)\n firstname = clean_str(authorfirstname, case=\"lower\", unicode=False)\n nameinitial = firstname[0].lower()\n textquery = {True: f'AUTHLASTNAME(\"{lastname}\") AND AUTHFIRST({nameinitial})',\n False: f'AUTHLASTNAME(\"{lastname}\") AND AUTHFIRST({firstname})'}\n return textquery[initial]\n\ndef ScopusDataShort(author):\n '''Scopus data for a searched author's'''\n data = {'scopus-id':author.identifier, 'first name':author.given_name, 'last name': author.surname,\n 'full name': f'{author.given_name} {author.surname}', 'documents': author.document_count, \n 'citations': author.cited_by_count, 'h-index':author.h_index}\n return data\n\ndef ScopusDataLong(short, author):\n '''Scopus data for a searched author's'''\n data = {'scopus-id':author.identifier, 'first name':author.given_name, 'last name': author.surname,\n 'full name': f'{author.given_name} {author.surname}', 'affiliation': short.affiliation, \n 'location' : f'{short.city}, {short.country}', 'areas':short.areas,\n 'documents': author.document_count, 'citations': author.cited_by_count, 'h-index':author.h_index}\n return data","repo_name":"JupyterPER/Data-analysis-conferences","sub_path":"notebooks/PybliometricsScopus.py","file_name":"PybliometricsScopus.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13609035067","text":"def tag(*tags):\n \"\"\"Decorator to add tags to a test class or method.\"\"\"\n def decorator(obj):\n if len(getattr(obj, '__bases__', [])) > 1:\n setattr(obj, 'tags', set(tags).union(*[\n set(base.tags)\n for base in obj.__bases__\n if hasattr(base, 'tags')\n ]))\n elif hasattr(obj, 'tags'):\n obj.tags = obj.tags.union(tags)\n else:\n setattr(obj, 'tags', set(tags))\n return obj\n return decorator\n","repo_name":"EOxServer/eoxserver","sub_path":"eoxserver/testing/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"81"} +{"seq_id":"15210620717","text":"import requests\nimport json\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import RadioButtons, Slider\nfrom sklearn.svm import SVR\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n\nclass Handler:\n def __init__(self):\n # plt.ion()\n self.url = \"https://seasweb.azurewebsites.net/data.json\"\n self.filter_const = 25\n self.by1 = None\n self.by2 = None\n self.py1 = None\n self.py2 = None\n self.unfiltered_series = None\n self.filtered_series = None\n self.data_to_plot = None\n self.subplot_axes= None\n self.radio = None\n self.slider = None\n self.func = None\n self.df = None\n\n # Downloading data and creating dataset\n def create_dataset(self):\n # Loading dataset from given url\n rawData = json.loads(requests.get(self.url).text)\n self.df = pd.DataFrame(rawData)\n # Creating numpy arrays for further processing\n self.by1 = self.df['BY1'].astype('float').to_numpy()\n self.by2 = self.df['BY2'].astype('float').to_numpy()\n self.py1 = self.df['PY1'].astype('float').to_numpy()\n self.py2 = self.df['PY2'].astype('float').to_numpy()\n # Creating lists of series\n self.unfiltered_series = [self.by1, self.by2, self.py1, self.py2]\n self.filtered_series = [self.__filter(self.by1), self.__filter(self.by2), self.__filter(self.py1), self.__filter(self.py2)]\n \n # Plotting all series together on one figure\n def plot_all(self):\n # Testing if dataset was created\n if self.df is None:\n raise ValueError(\"Dataset is not created\")\n \n # Plotting and setting up graph\n ax1 = self.df.plot(x='category', figsize=(10,9))\n ax1.title.set_text('European Energy Exchange (EEX) data for years 2021 and 2022')\n ax1.set_ylabel('Price[€]', fontsize='large', fontweight='bold')\n ax1.set_xlabel('Date', fontsize='large', fontweight='bold')\n ax1.legend([\"BY1 BaseLoad 2021 in €/MWh\", \"BY2 BaseLoad 2022 in €/MWh\", \"PY1 PeakLoad 2021 in €/MWh\", \"PY2 PeakLoad 2022 in €/MWh\", \"CO2 - Price of emission allowances in €/tonne\"])\n plt.draw()\n\n # Plotting all series separately on subplots\n def plot_by_one(self):\n # Testing if dataset was created\n if self.df is None:\n raise ValueError(\"Dataset is not created\")\n\n # Plotting and setting up graph\n fig2, self.subplot_axes = plt.subplots(2, 2, figsize=(10,9))\n fig2.subplots_adjust(left=0.3, wspace=0.2, hspace=0.3)\n fig2.suptitle('All series separately')\n fig2.text(0.25, 0.5, 'Price[€/MWh]', rotation='vertical', verticalalignment='center', fontsize='large', fontweight='bold')\n fig2.text(0.6, 0.03, 'Date', fontsize='large', horizontalalignment='center', fontweight='bold')\n fig2.text(0.05, 0.1, 'Use slider only when filtered series is selected\\nSlider for changing filter constant (filtering rate)', rotation='vertical',fontsize='large', fontweight='bold')\n \n # Reshaping list of axes for usage in for loop\n self.subplot_axes = np.reshape(self.subplot_axes, 4, 'F')\n # Updating which data will be plotted\n self.data_to_plot = self.unfiltered_series\n # Updating subplots by class function\n self.__subplot_update()\n\n # Creating radio button for changing which series to plot\n rax = plt.axes([0.05, 0.7, 0.15, 0.15])\n self.radio = RadioButtons(rax, ('Unfiltered series', 'Filtered series'))\n self.func = {'Unfiltered series': self.unfiltered_series, 'Filtered series': self.filtered_series}\n\n # Creating slider for changing filter constant\n axSlider = plt.axes([0.1, 0.1, 0.05, 0.5])\n self.slider = Slider(axSlider, 'Slider', valmin=1, valmax=125, valinit=25, orientation='vertical', valfmt='%d')\n\n # Assign a function handler to a button and slider\n self.radio.on_clicked(self.__radioButton_update)\n self.slider.on_changed(self.__slider_update)\n plt.draw()\n\n # Printing max values\n def print_max_values(self):\n # Printing 5 highest values to console for every series\n print(\"--------------------------------------------------\")\n print(\"Highest values of series [BY1, BY2, PY1, PY2] :\\n\")\n # Iterating throught all series\n for s in self.unfiltered_series:\n # Indirect partition\n ind = np.argpartition(-s, 5)[:5]\n a = s[ind]\n # Swapping order\n a = -np.sort(-a)\n print(a)\n print(\"--------------------------------------------------\")\n\n # Regression analysis\n def nonlinear_regression(self):\n # Prepocessing data for reggresion\n fig3 = plt.figure(3, figsize=(10,9))\n X = [i for i in range(len(self.unfiltered_series[0]))]\n X = np.asfarray(X).reshape(-1, 1)\n y = self.unfiltered_series[0]\n \n # Fit regression model\n svr_rbf = SVR(kernel='rbf', C=100, gamma=0.1, epsilon=.1)\n\n # Plot graph of reggresion\n plt.plot(X, svr_rbf.fit(X, y).predict(X), color='m', lw=2,\n label='{} model'.format('RBF'))\n\n # Plot points\n plt.scatter(X,y, s=10)\n\n # Displaying information\n a = svr_rbf.score(X, y)\n fig3.suptitle('RBF regression')\n fig3.text(0.5, 0.9, 'Coefficient of determination R^2 is %s'%(a), horizontalalignment='center', fontsize='large', fontweight='bold')\n plt.ylabel('Price[€]', fontsize='large', fontweight='bold')\n plt.xlabel('Days', fontsize='large', fontweight='bold')\n plt.draw()\n\n # The function which handle subplots updating every time data to plot or slider value was changed\n def __subplot_update(self):\n self.__plot_on_axis()\n\n # Positioning x label elements\n for a in self.subplot_axes:\n plt.setp(a.get_xticklabels(), rotation=30, ha='right')\n\n # Naming each subplot\n self.subplot_axes[0].title.set_text('BY1')\n self.subplot_axes[1].title.set_text('BY2')\n self.subplot_axes[2].title.set_text('PY1')\n self.subplot_axes[3].title.set_text('PY2')\n\n plt.draw()\n\n # Handler for radio button\n def __radioButton_update(self, label):\n # Updating which data will be plotted\n self.data_to_plot = self.func[label]\n # Updating subplots by class function\n self.__subplot_update()\n # Reseting slider\n self.slider.reset()\n\n # Handler for slider\n def __slider_update(self, val):\n # Slider is working only in filtered series state\n if self.radio.value_selected == 'Filtered series':\n # Updating filter constant\n self.filter_const = int(self.slider.val)\n # Updating list of filtered series\n self.filtered_series = [self.__filter(self.by1), self.__filter(self.by2), self.__filter(self.py1), self.__filter(self.py2)]\n # Updating which data to plot\n self.data_to_plot = self.filtered_series\n # Updating subplots by class function\n self.__subplot_update()\n\n # The function for plotting columns of dataset to separated subplots\n def __plot_on_axis(self):\n # Each axis, one graph\n if len(self.data_to_plot) != len(self.subplot_axes):\n raise ValueError('This function plot one column of dataset stored in array on one axis. Data array length is not the same as axes array length.')\n i = 0\n for a in self.subplot_axes:\n a.clear()\n a.plot(pd.to_datetime(self.df['category']), self.data_to_plot[i])\n i += 1\n\n # The filter, it uses Furier transformation\n # ???!Malokedy vyuzijem v praxi nieco co som sa naucil v skole, ale toto je jedna z tych veci ktore som pochopil a pouzil!???\n def __filter(self, input):\n furrier_transform = np.fft.fft(input)\n shifted_furrier_transform = np.fft.fftshift(furrier_transform)\n HP_filter = np.zeros(len(shifted_furrier_transform), dtype=int)\n n = int(len(HP_filter))\n HP_filter[int(n/2) - self.filter_const : int(n/2) + self.filter_const] = 1\n output = shifted_furrier_transform * HP_filter\n output = abs(np.fft.ifft(output))\n\n return output \npass\n\n\nhandler = Handler()\nhandler.create_dataset()\nhandler.plot_all()\nhandler.plot_by_one()\nhandler.nonlinear_regression()\nhandler.print_max_values()\nplt.show()","repo_name":"LadislavBrecka/SE_EEX_Data","sub_path":"task_2_OOP_file.py","file_name":"task_2_OOP_file.py","file_ext":"py","file_size_in_byte":8563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13699287087","text":"from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask import flash\nfrom flask_app.models import user\n\nclass Character:\n db_name = \"character_project_schema\"\n\n def __init__(self, data):\n self.id = data[\"id\"]\n self.name = data[\"name\"]\n self.race = data[\"race\"]\n self.classname = data[\"classname\"]\n self.level = data[\"level\"]\n self.created_at = data[\"created_at\"]\n self.updated_at = data[\"updated_at\"]\n self.user = None\n\n @classmethod\n def add_character(cls, data):\n query = \"\"\"\n INSERT INTO characters\n (name, race, classname, level, user_id)\n VALUES\n (%(name)s, %(race)s, %(classname)s, %(level)s, %(user_id)s);\n \"\"\"\n return connectToMySQL(cls.db_name).query_db(query, data)\n\n @classmethod\n def get_all_characters(cls):\n query = \"\"\"\n SELECT * FROM characters\n JOIN users\n ON characters.user_id = users.id;\n \"\"\"\n results = connectToMySQL(cls.db_name).query_db(query)\n if len(results) == 0:\n return []\n else:\n all_character_objects = []\n for character_dictionary in results:\n character_obj = cls(character_dictionary)\n # Grab the user's info\n user_dictionary = {\n \"id\": character_dictionary[\"users.id\"],\n \"first_name\": character_dictionary[\"first_name\"],\n \"last_name\": character_dictionary[\"last_name\"],\n \"email\": character_dictionary[\"email\"],\n \"password\": character_dictionary[\"password\"],\n \"created_at\": character_dictionary[\"users.created_at\"],\n \"updated_at\": character_dictionary[\"users.updated_at\"]\n }\n # Create the User object\n user_obj = user.User(user_dictionary)\n # Link this User to this character\n character_obj.user = user_obj\n # Add this character to the list of all character objects\n all_character_objects.append(character_obj)\n return all_character_objects\n\n @classmethod\n def get_one_character(cls, data):\n query = \"\"\"\n SELECT * FROM characters\n JOIN users \n ON characters.user_id = users.id\n WHERE characters.id = %(id)s;\n \"\"\"\n results = connectToMySQL(cls.db_name).query_db(query, data)\n if len(results) == 0:\n return None\n else:\n # create a variable for results[index] for clarity\n character_dictionary = results[0]\n character_obj = cls(character_dictionary)\n # Grab the user's info\n user_dictionary = {\n \"id\": character_dictionary[\"users.id\"],\n \"first_name\": character_dictionary[\"first_name\"],\n \"last_name\": character_dictionary[\"last_name\"],\n \"email\": character_dictionary[\"email\"],\n \"password\": character_dictionary[\"password\"],\n \"created_at\": character_dictionary[\"users.created_at\"],\n \"updated_at\": character_dictionary[\"users.updated_at\"]\n }\n # Create the User object\n user_obj = user.User(user_dictionary)\n # Link this User to this character\n character_obj.user = user_obj\n return character_obj\n\n @classmethod\n def edit_character(cls, data):\n query= \"\"\"\n UPDATE characters SET\n name = %(name)s,\n race = %(race)s,\n classname = %(classname)s,\n level = %(level)s\n WHERE\n id = %(id)s;\n \"\"\"\n return connectToMySQL(cls.db_name).query_db(query, data)\n\n @classmethod\n def delete_character(cls, data):\n query= \"DELETE FROM characters WHERE id = %(id)s;\"\n return connectToMySQL(cls.db_name).query_db(query, data)\n\n @staticmethod\n def validate_character(form_data):\n is_valid = True\n if len(form_data[\"name\"]) < 2:\n flash (\"Name must be 2 or more characters\")\n is_valid = False\n if len(form_data[\"race\"]) < 2:\n flash (\"Race must be 2 or more characters\")\n is_valid = False\n if len(form_data[\"classname\"]) < 2:\n flash (\"Class must be 2 or more characters\")\n is_valid = False\n if len(form_data[\"level\"]) < 1:\n flash (\"Level must be between 1 and 20\")\n is_valid = False\n return is_valid","repo_name":"hensleymd/character-project","sub_path":"flask_app/models/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":4555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15959349844","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n Author: qinLess\n File: handler.py\n Time: 2021/5/17 下午6:07\n-------------------------------------------------\n Change Activity: 2021/5/17 下午6:07\n-------------------------------------------------\n Desc: \n\"\"\"\nfrom magical.utils import load_objects\n\n\nclass PipelineHandler(object):\n\n def __init__(self, spider, **kwargs):\n self.spider = spider\n self.kwargs = kwargs\n self.logger = spider.logger\n self.settings = spider.settings\n\n handler_manager_cls = self.settings['PIPELINE_MIDDLEWARE_MANAGER_PATH']\n self.middleware = load_objects(handler_manager_cls)(spider)\n\n def pipeline(self, item, **kwargs):\n return self.middleware.pipeline(item, **kwargs)\n","repo_name":"qinLess/magical","sub_path":"magical/sync_spider/middleware/pipeline/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"74202677065","text":"class Solution:\n def rotateTheBox(self, box: List[List[str]]) -> List[List[str]]:\n m=len(box)\n n=len(box[0])\n for row in range(m):\n down_empty_pos=n-1\n for col in range(n-1,-1,-1):\n if box[row][col]=='#':\n # move the small box to next empty positon\n # empty this place\n box[row][col]='.'\n box[row][down_empty_pos]='#'\n down_empty_pos-=1\n elif box[row][col]=='*':\n # obstacles\n down_empty_pos=col-1\n\n # now rotate and return the new matrix\n # number of rows becomes number of columns and vice versa\n returnMatrix=[[None]*m for _ in range(n)]\n\n for i in range(n):\n for j in range(m):\n # clockwise 90 degree rotation\n returnMatrix[i][j]=box[m-1-j][i]\n return returnMatrix","repo_name":"akshu15/LeetCode","sub_path":"1861-rotating-the-box/1861-rotating-the-box.py","file_name":"1861-rotating-the-box.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14676435044","text":"import plotly.graph_objects as go\nimport plotly.express as px\nfrom plotly.subplots import make_subplots\n\nfrom .utils import *\nimport math\nimport json\n\nimport numpy as np\nimport pandas as pd\n\nfrom itertools import islice\n\n\nPOPULATION = [197385, 276368, 729997, 266112, 319004, 1942044, 382604, 146717, 92518, 44561, 90311, 420082, 422993, 494796, 589400, 1507070, 242399]\n\nDENSITY = [8.9, 2.8, 38.8, 7.4, 31.2, 3889.8, 12.4, 2.5, 0.4, 0.1, 4.4, 27.9, 1710.9, 39.8, 28.4, 135.4, 35.0]\nICU_CAP = 1000\nHEADERS= [\"Region\", \"Total Number of Cases\", \"Date of Infection\"]\n\nNHOOD_MAP = {\n 'Côte-des-Neiges–Notre-Dame-de-Grâce' : 'Côte-des-Neiges-Notre-Dame-de-Grâce',\n 'Plateau-Mont-Royal' : 'Le Plateau-Mont-Royal',\n \"Baie-D'Urfé\" : \"Baie-d'Urfé\",\n 'Mercier–Hochelaga-Maisonneuve' : 'Mercier-Hochelaga-Maisonneuve',\n 'Côte-Saint-Luc' : 'Côte-Saint-Luc',\n 'Kirkland' : 'Kirkland',\n \"L'Île-Bizard–Sainte-Geneviève\" :\"L'Île-Bizard-Sainte-Geneviève\",\n 'Pierrefonds–Roxboro' :'Pierrefonds-Roxboro',\n 'Rivière-des-Prairies–Pointe-aux-Trembles' : 'Rivière-des-Prairies-Pointe-aux-Trembles',\n 'Rosemont–La Petite Patrie' : 'Rosemont-La Petite-Patrie',\n 'Saint-Léonard' : 'Saint-Léonard',\n 'Senneville' : 'Senneville',\n 'Ahuntsic–Cartierville' :'Ahuntsic-Cartierville',\n 'Sud-Ouest' : 'Le Sud-Ouest',\n 'Villeray–Saint-Michel–Parc-Extension' :'Villeray-Saint-Michel-Parc-Extension' \n \n}\n\ndef plot_total_cases(stacked_df, df, dates):\n total_cases = list(df.iloc[19])[1:]\n\n fig = px.bar(stacked_df, x=\"Date of Infection\", y=\"Total Number of Cases\", color = \"Region\", color_discrete_sequence= px.colors.qualitative.Light24)\n fig.add_trace(go.Scatter(x=dates, y=total_cases,\n mode='lines',\n name='Total'))\n fig.update_layout(legend_orientation=\"h\", xaxis = {\"dtick\" : 10})\n fig.update_layout(legend=dict( y=-0.45))\n\n fig.show()\n with open('plotly/total_cases.json', 'w') as f:\n f.write(fig.to_json())\n\ndef plot_region_cases(stacked_df, dates ):\n all_regions = stacked_df.Region.unique()\n regions_cases = [stacked_df.loc[stacked_df['Region'] == r][\"Total Number of Cases\"].to_list() for r in all_regions]\n\n fig = make_subplots(rows=9, cols=2, subplot_titles=tuple(all_regions[:17]), shared_xaxes=True, shared_yaxes = True)\n for i,m in enumerate(regions_cases[:17]) :\n\n r = i//2+1\n c = i%2+1\n\n fig.add_trace(\n go.Scatter(x=dates, y=m, mode = \"lines\"),\n row=r, col=c\n )\n \n fig.update_layout(showlegend=False, title_text=\"Infection over time by Region\", height=1000)\n fig.show()\n\n with open('plotly/infections_by_region.json', 'w') as f:\n f.write(fig.to_json())\n\n\ndef plot_hospitilazation(df, dates):\n hospitalization = list(df.loc[[28]].values)[0][31:]\n icu = list(df.loc[[29]].values)[0][31:]\n\n fig = go.Figure()\n\n fig.add_trace(\n go.Scatter(x= dates[30:], y=hospitalization, name=\"Hospitalized\"),\n\n )\n\n fig.add_trace(\n go.Scatter(x=dates[30:], y=icu, name=\"ICU\"),\n )\n\n\n fig.add_trace(\n go.Scatter(x=dates[30:], y=[ICU_CAP]*len(dates[30:]), name=\"ICU Capacity\" , mode='lines',),\n )\n fig.update_layout(title='Hospitalization',\n xaxis_title='Date',\n yaxis_title='Number of Patients')\n\n fig.show()\n\n with open('plotly/hospitilization.json', 'w') as f:\n f.write(fig.to_json())\n\ndef plot_new_cases(df, dates):\n new_cases = list(df.iloc[20])[2:]\n fiveday_avg = nday_avg(new_cases, 5)\n\n fig = go.Figure(data=go.Scatter(x=dates[1:], y=new_cases,\n mode='lines',\n name='New Cases'))\n\n fig.add_trace(go.Scatter(x=dates[8:], y=fiveday_avg,\n mode='lines',\n name='Five Day Average', visible = False))\n\n\n fig.update_layout(\n updatemenus=[\n dict(\n #type=\"buttons\",\n direction=\"down\",\n showactive = True,\n #active = 0,\n x=1,\n y=1.2,\n buttons=list([\n dict(label=\"New Cases\",\n method=\"update\",\n args=[{\"visible\": [True,False ]},\n {\"title\": 'New Cases', \n \"xaxis.title\" : 'Date',\n \"yaxis.title\" : 'Number of Cases',\n \"yaxis.type\": \"linear\",\n #\"xaxis.type\": \"linear\",\n \"annotations\" : [],\n }] ),\n dict(label=\"5 day average\",\n method=\"update\",\n args=[{\"visible\": [False,True]},\n {\"title\": 'Average Cases in Last 5 days', \n \"xaxis.title\" : 'Date',\n \"yaxis.title\" : 'Number of Cases',\n \"yaxis.type\": \"linear\",\n #\"xaxis.type\": \"linear\",\n \"annotations\" : [],\n }] ),\n\n ]),\n )\n ])\n\n fig.update_layout(title='New Cases Each Day',\n xaxis_title='Date',\n yaxis_title='Number of New Cases', xaxis = {\"dtick\" : 10})\n fig.show()\n with open('plotly/new_cases.json', 'w') as f:\n f.write(fig.to_json())\n\n\ndef plot_testing(df, dates):\n positive = list(df.iloc[20])[3:]\n negative = list(df.iloc[22])[3:] \n\n total_negative = list(np.cumsum(negative))\n total_positive = list(np.cumsum(positive))\n total_tests = total_negative+total_positive\n positive_per_test = [ pos/total for pos, total in zip(total_positive, total_tests)] \n\n fig = go.Figure(data=[\n go.Bar(name='Negative', x=dates[2:], y=negative[2:] ,visible = False),\n go.Bar(name='Positive', x=dates[2:], y=positive[2:], visible = False)])\n\n fig.update_layout(barmode='stack')\n\n fig.add_trace(go.Scatter(x=dates[3:], y=positive_per_test[3:],\n mode='lines',\n name='Positive Tests per Total Tests (Cumulative Average)'))\n\n fig.add_trace(go.Scatter(x=dates[3:], y=total_tests[3:],\n mode='lines',\n name='Total Tests', visible = False))\n\n fig.update_layout(\n xaxis_title='Date',\n yaxis_title='Positive Tests per Total Test', xaxis = {\"dtick\" : 10})\n\n fig.update_layout(\n updatemenus=[\n dict(\n #type=\"buttons\",\n direction=\"down\",\n showactive = True,\n #active = 0,\n x=1,\n y=1.2,\n buttons=list([\n dict(label=\"Positive Tests Per\",\n method=\"update\",\n args=[{\"visible\": [False,False, True, False]},\n {\"title\": 'Positive Tests per Total Tests (Cumulative Average)', \n \"xaxis.title\" : 'Date',\n \"yaxis.title\" : 'Positive Tests per Total Test',\n \"yaxis.type\": \"linear\",\n #\"xaxis.type\": \"linear\",\n \"annotations\" : [],\n }] ),\n dict(label=\"Test\",\n method=\"update\",\n args=[{\"visible\": [True,True, False, False]},\n {\"title\": 'Total Tests', \n \"xaxis.title\" : 'Date',\n \"yaxis.title\" : 'Number of Tests',\n \"yaxis.type\": \"linear\",\n #\"xaxis.type\": \"linear\",\n \"annotations\" : [],\n }] ),\n\n ]),\n )\n ])\n\n fig.show()\n with open('plotly/tested.json', 'w') as f:\n f.write(fig.to_json()) \n\n\ndef plot_breakdown(df, dates):\n total = list(df.iloc[19][1:])\n dead = list(df.iloc[24])[1:]\n recovered = list(df.iloc[25])[1:]\n active = [ t-r-d for t,r,d in zip(total, recovered, dead)]\n\n dead_percent = [d/t*100 for d,t in zip(dead, total)]\n recovered_percent = [r/t*100 for r,t in zip(recovered, total)]\n active_percent = [a/t*100 for a,t in zip(active, total)]\n\n\n fig = go.Figure()\n\n fig.add_trace(\n go.Scatter(x= dates, y=dead, name=\"Deceased\", visible = True),\n\n )\n fig.add_trace(\n go.Scatter(x=dates, y=dead_percent, name=\"Deceased Percent\", visible = False),\n )\n fig.add_trace(\n go.Scatter(x= dates, y=recovered, name=\"Recovered\", visible = True),\n )\n fig.add_trace(\n go.Scatter(x=dates, y=recovered_percent, name=\"Recovered Percent\", visible = False),\n )\n fig.add_trace(\n go.Scatter(x= dates, y=active, name=\"Active\"),\n )\n fig.add_trace(\n go.Scatter(x=dates, y=active_percent, name=\"Active Percent\", visible = False),\n )\n fig.update_layout(\n updatemenus=[\n dict(\n #type=\"buttons\",\n direction=\"down\",\n showactive = True,\n #active = 0,\n x=1,\n y=1.5,\n buttons=list([\n dict(label=\"Current Active\",\n method=\"update\",\n args=[{\"visible\": [True,False,True, False, True, False]},\n {\"title\": 'Active, Deceased and Recovered Cases', \n \"xaxis.title\" : 'Date',\n \"yaxis.title\" : 'Number of Cases',\n \"yaxis.type\": \"linear\",\n #\"xaxis.type\": \"linear\",\n \"annotations\" : [],\n }] ),\n dict(label=\"Percentage Active\",\n method=\"update\",\n args=[{\"visible\": [False,True,False, True, False, True]},\n {\"title\": 'Percentage of Active, Deceased and Recovered Cases', \n \"xaxis.title\" : 'Date',\n \"yaxis.title\" : 'Percentage of Cases',\n \"yaxis.type\": \"linear\",\n #\"xaxis.type\": \"linear\",\n \"annotations\" : [],\n }] ),\n\n ]),\n )\n ])\n\n\n fig.update_layout(\n title_text=\"Active Cases\",\n )\n fig.update_layout(legend_orientation=\"h\")\n fig.update_layout(legend=dict( y=-0.4), xaxis = {\"dtick\" : 10})\n fig.show()\n\n with open('plotly/cases.json', 'w') as f:\n f.write(fig.to_json())\n\ndef plot_map_per100k(lastday_df, quebec):\n\n fig = px.choropleth(lastday_df, geojson=quebec, color=\"Cases per 100k\", locations=\"res_nm_reg\", \n featureidkey= \"properties.res_nm_reg\", color_continuous_scale='Blues',\n projection='conic equidistant'\n )\n fig.update_geos(fitbounds=\"locations\", visible=False)\n fig.update_layout(title='Total Cases per 100k')\n fig.show()\n with open('../plotly/map_population.json', 'w') as f:\n f.write(fig.to_json())\n\ndef plot_exponential(df, stacked_df):\n\n total = list(df.iloc[19][1:])\n\n region_cases = []\n for i in range(17):\n temp = list(df.iloc[i][1:])\n ix_0 = [i for i, t in enumerate(temp) if t >0]\n if len(ix_0) >0 : \n temp = temp[ix_0[0]:]\n else : \n temp = []\n region_cases.append(temp)\n\n region_cases_per_100k = []\n for i, r in enumerate(region_cases): \n if r : \n region_cases_per_100k.append( [ 100000*x/POPULATION[i] for x in r ])\n else:\n region_cases_per_100k.append([])\n\n total_cases_per_100k = [ 100000*x/8164361 for x in total ]\n regions = stacked_df.Region.unique()\n\n past_week = []\n for i,_ in enumerate(total[7:]):\n past_week.append(total[i:i+7]) \n past_week_sum = [100000*(w[-1]-w[0])/8164361 for w in past_week]\n\n region_week = []\n for i, r in enumerate(region_cases): \n if len(r)>=7 :\n temp = []\n for r0,r7 in zip(r,r[7:]):\n temp.append(100000*(r7-r0)/POPULATION[i])\n region_week.append( temp)\n else:\n region_week.append([]) \n\n fig = go.Figure()\n\n color = ['#EB89B5', '#5b7bd6','#ff6960', '#d11411', '#00b159', '#00aedb', '#f37735', '#ffc425',\n '#a200ff', '#f47835', '#d41243', '#8ec127', '#feda75', '#fa7e1e', '#962fbf', '#fa9e1e', \n '#800000', '#FFFF00', '#7D3C98']\n\n fig.add_trace(go.Scatter(y=total_cases_per_100k,name = \"All Quebec\", mode = 'lines+markers'))\n\n for r_c, r in zip(region_cases_per_100k, regions):\n fig.add_trace(go.Scatter(y=r_c, name = r, mode = 'lines+markers'))\n\n # fig.update_layout(\n # title_text='Exponential Growth since First Infection', # title of plot\n # xaxis_title_text='Date since first infection', # xaxis label\n # yaxis_title_text='Cases per 100k (log scale)', # yaxis label\n\n # )\n\n\n fig.add_trace(go.Scatter(x=total_cases_per_100k[7:], y=past_week_sum,name = \"All Quebec\", mode = 'lines+markers', visible=False,))\n\n\n for r_100, r_c, r in zip(region_cases_per_100k, region_week, regions):\n fig.add_trace(go.Scatter(x =r_100[7:] , y=r_c, name = r, mode = 'lines+markers', visible=False))\n\n fig.update_layout(\n xaxis_title_text='Date since 1st infection', # xaxis label\n yaxis_title_text='Cases per 100k (log scale)', # yaxis label\n\n )\n fig.update_layout(\n updatemenus=[\n dict(\n #type=\"buttons\",\n direction=\"down\",\n showactive = True,\n #active = 0,\n x=1,\n y=1.5,\n buttons=list([\n dict(label=\"Growth per Date\",\n method=\"update\",\n args=[{\"visible\": [True]*18+ [False]*18},\n {\"title\": 'Exponential Growth since First Infection', \n \"xaxis.title\" : 'Days since first infection',\n \"yaxis.title\" : 'Cases per 100k (log scale)',\n \"yaxis.type\": \"linear\",\n \"xaxis.type\": \"linear\",\n \"annotations\" : [],\n }] ),\n dict(label=\"Growth per total Cases\",\n method=\"update\",\n args=[{\"visible\": [False]*18+ [True]*18},\n {\"title\": 'Exponential Growth per total cases',\n \"xaxis.title\" : 'Cases per 100k ',\n \"yaxis.title\" : 'New Cases per 100k in last week',\n \"yaxis.type\": \"linear\",\n \"xaxis.type\": \"linear\",\n }]),\n dict(label=\"Growth per Date Log Scale\",\n method=\"update\",\n args=[{\"visible\": [True]*18+ [False]*18},\n {\"title\": 'Exponential Growth since First Infection', \n \"xaxis.title\" : 'Days since first infection',\n \"yaxis.title\" : 'Cases per 100k (log scale)',\n \"yaxis.type\": \"log\",\n \"xaxis.type\": \"linear\",\n \"annotations\" : [],\n }] ),\n dict(label=\"Growth per total Cases Log Scale\",\n method=\"update\",\n args=[{\"visible\": [False]*18+ [True]*18},\n {\"title\": 'Exponential Growth per total cases',\n \"xaxis.title\" : 'Cases per 100k (log scale)',\n \"yaxis.title\" : 'New Cases per 100k in last week(log scale)',\n \"yaxis.type\": \"log\",\n \"xaxis.type\": \"log\",\n }]),\n ]),\n )\n ])\n\n\n fig.update_layout(\n title_text=\"Exponential\",\n )\n fig.update_layout(legend_orientation=\"h\")\n fig.update_layout(legend=dict( y=-0.4))\n\n\n fig.show()\n\n with open('plotly/exponential.json', 'w') as f:\n f.write(fig.to_json())\n\n\ndef plot_montreal_nhood(montreal_nhood, montreal_nhood_geojson):\n\n keys = list(montreal_nhood.keys())\n\n montreal_map_nhood = {}\n # montreal_rates = {}\n\n for key,val in montreal_nhood.items() :\n stripped = key.strip()\n if stripped in NHOOD_MAP: \n montreal_map_nhood[nhood_map[stripped]] = int(val[0].replace(\",\",\"\").replace(\"< \",\"\").replace(\"<\",\"\").replace(\" \",\"\"))\n else : \n montreal_map_nhood[stripped] = int(val[0].replace(\",\",\"\").replace(\"< \",\"\").replace(\"<\",\"\").replace(\" \",\"\"))\n # del montreal_map_nhood['Total à Montréal']\n # del montreal_map_nhood['Territoire à confirmer²']\n del montreal_map_nhood['Total for Montréal']\n del montreal_map_nhood['Territory to be confirmed2']\n #montreal_rates[key] = float(val[1].replace(\",\",\"\"))\n\n montreal_df = pd.DataFrame(montreal_map_nhood.items(), columns=['Neighbourhoods', 'Cases'])\n\n fig = px.choropleth(montreal_df, geojson=montreal_nhood_geojson , color=\"Cases\", locations='Neighbourhoods', \n featureidkey= \"properties.NOM\",\n projection=\"mercator\"\n )\n fig.update_geos(fitbounds=\"locations\", visible=False)\n fig.update_layout(title='Cases per Neighbourhood')\n fig.show()\n with open('plotly/map_montreal_nhood.json', 'w') as f:\n f.write(fig.to_json())\n\ndef plot_age(age_deaths, age_case):\n del age_deaths[\"Âge à déterminer\"]\n age_deaths_map = {}\n for age, deaths in age_deaths.items():\n if age == 'Moins de 30 ans':\n key = '<30'\n elif age == '90 ans et plus': \n key = '>90'\n else :\n key = age.strip(\" ans\")\n age_deaths_map[key] = string_to_float(deaths)\n deaths_ages_df = pd.DataFrame(age_deaths_map.items(), columns=['Ages', 'Deaths (%)'])\n\n del age_case['Âge à déterminer']\n age_cases_map = {}\n for age, deaths in age_case.items():\n if age == '90 ans ou plus': \n key = '>90'\n else :\n key = age.strip(\" ans\")\n age_cases_map[key] = float(deaths.replace(\",\",\".\"))\n ages_df = pd.DataFrame(age_cases_map.items(), columns=['Ages', 'Cases (%)'])\n\n\n fig = go.Figure([go.Bar(x=deaths_ages_df[\"Ages\"], y=deaths_ages_df['Deaths (%)'])])\n\n fig.add_trace(\n go.Bar(x=ages_df[\"Ages\"], y=ages_df['Cases (%)'], visible = False),\n )\n\n fig.update_layout(\n xaxis_title_text='Age ranges', # xaxis label\n yaxis_title_text='Percentage', # yaxis label\n\n )\n\n fig.update_layout(\n updatemenus=[\n dict(\n #type=\"buttons\",\n direction=\"down\",\n showactive = True,\n #active = 0,\n x=1,\n y=1.5,\n buttons=list([\n dict(label=\"Deaths Age\",\n method=\"update\",\n args=[{\"visible\": [True,False]},\n {\"title\": 'Deaths by Age', \n \"xaxis.title\" : 'Age',\n \"yaxis.title\" : 'Percentage',\n \"yaxis.type\": \"linear\",\n #\"xaxis.type\": \"linear\",\n \"annotations\" : [],\n }] ),\n dict(label=\"Cases Age\",\n method=\"update\",\n args=[{\"visible\": [False,True]},\n {\"title\": 'Cases by Age', \n \"xaxis.title\" : 'Ages',\n \"yaxis.title\" : 'Percentage of Cases',\n \"yaxis.type\": \"linear\",\n #\"xaxis.type\": \"linear\",\n \"annotations\" : [],\n }] ),\n\n ]),\n )\n ])\n\n\n fig.update_layout(\n title_text=\"Deaths by Age\",\n )\n fig.update_layout(legend_orientation=\"h\")\n fig.update_layout(legend=dict( y=-0.4))\n\n fig.show()\n\n with open('plotly/age.json', 'w') as f:\n f.write(fig.to_json())","repo_name":"nickbent/covid-quebec-ontario","sub_path":"src/plot/plot_quebec.py","file_name":"plot_quebec.py","file_ext":"py","file_size_in_byte":20861,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"41262797107","text":"# -*- coding: utf-8 -*-\n# @Author: WuLC\n# @Date: 2017-09-20 14:15:53\n# @Last Modified by: WuLC\n# @Last Modified time: 2017-09-20 14:28:09\n\n\n# stack\n# use stack to store indices of left brace and star\n# pop left brace whenever meeting right brace, when left brace is empty, pop stars\n# finally check whether left brace if empty, else use stars to map with them\nclass Solution(object):\n def checkValidString(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n left_idx, star_idx = [], []\n for i in xrange(len(s)):\n if s[i] == '(':\n left_idx.append(i)\n elif s[i] == '*':\n star_idx.append(i)\n else:\n if left_idx:\n left_idx.pop()\n elif star_idx:\n star_idx.pop()\n else:\n return False\n while left_idx and star_idx:\n if left_idx[-1] > star_idx[-1]:\n break\n else:\n left_idx.pop()\n star_idx.pop()\n return len(left_idx) == 0\n \n \n ","repo_name":"WuLC/LeetCode","sub_path":"Algorithm/Python/678. Valid Parenthesis String.py","file_name":"678. Valid Parenthesis String.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"1424203767","text":"import pandas as pd\nimport os.path\nimport html\nimport spacy\nimport time\n\nfrom string import punctuation\nfrom langdetect import detect\nfrom google.cloud import translate_v2 as translate\n\n\n# data\nTICKETS = pd.read_csv(f'./data/tickets/tickets.csv')\nSTATUS_LOG = pd.read_csv(f'./data/tickets/status_log.csv')\n\n# google translate setup\nCREDENTIALS_PATH = './translation-credentials.json'\nCLIENT = (\n translate.Client.from_service_account_json(CREDENTIALS_PATH)\n if os.path.exists(CREDENTIALS_PATH)\n else None\n)\n\n# spacy setup\nNLP = spacy.load('de_core_news_md')\n\n\ndef preprocess(force=False, test=False, translate=False):\n \"\"\"Preprocess ticket data into a pandas dataframe ready for futher work.\"\"\"\n s = './data/messages.parquet'\n if force or test or not os.path.exists(s):\n messages = TICKETS[[\n 'ID',\n 'Bearbeiter',\n 'Angelegt Am',\n 'Kategorie ID',\n ]].dropna()\n messages = messages.rename(columns={\n 'ID': 'id',\n 'Bearbeiter': 'operator',\n 'Angelegt Am': 'timestamp',\n 'Kategorie ID': 'category',\n })\n if test:\n messages = messages[:100]\n ops = messages.groupby('operator')['operator'].agg(['count'])\n ops = ops.reset_index().sort_values('count', ascending=False)\n ops = list(ops['operator'][:-10])\n messages = messages[messages['operator'].apply(lambda x: x in ops)]\n messages['category'] = messages['category'].apply(lambda x: x.strip())\n messages = messages[messages['category'].str.len() > 0]\n messages['timestamp'] = pd.to_datetime(\n messages['timestamp'],\n infer_datetime_format=True,\n utc=True,\n )\n # messages['year'] = messages['timestamp'].apply(lambda x: x.year)\n messages['text'] = messages['id'].apply(\n lambda x: get_first_message(fetch_ticket(x)),\n )\n messages = messages.dropna()\n messages['text'] = messages['text'].apply(lambda x: clean(x))\n messages['language'] = messages['text'].apply(\n lambda x: detect_language(x),\n )\n messages['translated-text'] = messages.apply(\n lambda row: (\n row['text']\n if not translate or row['language'] == 'de'\n else translate_to_german(row['text'])\n ),\n axis=1,\n )\n '''\n messages['text'] = messages['text'].apply(\n lambda x: ' '.join(extract_keywords(x)),\n )\n '''\n messages = messages.reset_index(drop=True)\n messages.to_parquet(s)\n return messages\n return pd.read_parquet(s)\n\n\ndef fetch_ticket(identifier):\n \"\"\"Return data of ticket with given identifier as pandas dataframe.\"\"\"\n try:\n return pd.read_csv(f'./data/tickets/{identifier}.csv')\n except:\n return None\n\n\ndef get_first_message(ticket_data):\n \"\"\"Get first real message in ticket conversations.\n\n Sometimes there are weird first messages contained in the ticket CSVs,\n that start with SY-SYSID.\n\n \"\"\"\n if ticket_data is None:\n return None\n starts = ['SY-DBSYS', 'SY-SYSID']\n for index, row in ticket_data.iterrows():\n if not any([row['Text'].startswith(start) for start in starts]):\n # filter out test messages like `2000000151`: \"Es brennt\"\n if (\n len(row['Text']) < 100\n or row['Nachrichtentyp'].strip() != 'Beschreibung'\n ):\n return None\n return row['Text']\n\n\ndef clean(text):\n \"\"\"Clean text of any weird characters.\"\"\"\n words = text.replace('\\n', ' ').split()\n out = []\n for word in words:\n word = word.strip('/-_<>&')\n if word:\n out.append(word)\n return ' '.join(out)\n\n\ndef detect_language(text):\n \"\"\"Detect the language of the given text.\"\"\"\n return detect(text)\n\n\ndef translate_to_german(text):\n \"\"\"Translate text from any language to german via Google Translate API.\"\"\"\n assert CLIENT, 'no Google Translate credentials provided'\n time.sleep(0.2) # rate limiting\n s = CLIENT.translate(text, target_language='de')['translatedText']\n return html.unescape(s)\n\n\ndef extract_keywords(text):\n \"\"\"Extract and clean most important words in the text.\"\"\"\n tag = ['PROPN', 'ADJ', 'NOUN', 'VERB', 'ADV', 'NUM']\n doc = NLP(text.lower())\n result = []\n for token in doc:\n if(token.text in NLP.Defaults.stop_words or token.text in punctuation):\n continue\n if(token.pos_ in tag):\n result.append(token.lemma_)\n return result\n","repo_name":"josepquintana/tum-praktikum-ml","sub_path":"src/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10783289597","text":"#!/usr/bin/python\n\nimport yarp\nimport os\nimport time\nimport random\n\nyarp.Network.init();\n\nportOut = yarp.BufferedPortBottle();\n\nportOut.open(\"/testSender\");\n\n#os.system(\"yarp connect /ants/outUB /ctrl2/in/pos\");\n#os.system(\"yarp connect /ctrl/out/pos /3ds01/pos\");\n\nii = 0\nwhile ii<1 : \n\tbottleOutSend = portOut.prepare();\n\tbottleOutSend.clear(); \n\n\tfor x in range(0, 3): \n\t\tbottleOutList = bottleOutSend.addList();\n\t \n\t\tbottleTemp = bottleOutList.addList(); \n\t\tss = \"ID\" + \" \" + str(random.randint(10,99))\n\t\tbottleTemp.fromString(ss) #this creates a searchable yarp.Value (!)\n\t\tbottleTemp = bottleOutList.addList(); \n\t\tss = \"x\" + \" \" + str(random.random())\n\t\tbottleTemp.fromString(ss) #this creates a searchable yarp.Value (!)\n\t\tbottleTemp = bottleOutList.addList(); \n\t\tss = \"y\" + \" \" + str(random.random())\n\t\tbottleTemp.fromString(ss) #this creates a searchable yarp.Value (!)\n\n\tprint (\"Sending \", bottleOutSend.toString());\n\tportOut.write();\n\ttime.sleep(.1)\n\nportOut.close();\nyarp.Network.fini();\t\t\t\n","repo_name":"maryamsab/realact","sub_path":"testSenderPython/testSenderAnTS.py","file_name":"testSenderAnTS.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"4788654019","text":"import audioop\r\nimport speech_recognition as sr\r\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\r\nimport torch\r\nimport pyttsx3\r\n\r\ntokenizer = AutoTokenizer.from_pretrained(\"microsoft/DialoGPT-large\")\r\nmodel = AutoModelForCausalLM.from_pretrained(\"microsoft/DialoGPT-large\")\r\n\r\nengine = pyttsx3.init()\r\n\r\nfor voice in engine.getProperty(\"voices\"):\r\n print(voice)\r\n\r\nvoices = engine.getProperty(\"voices\")\r\n\r\nengine.setProperty(\"voice\", voices[1].id)\r\n\r\ndef main(Audio1):\r\n\r\n r = sr.Recognizer()\r\n\r\n step = 0\r\n\r\n engine.say(Audio1)\r\n engine.runAndWait()\r\n\r\n mic = sr.Microphone()\r\n\r\n with mic as source:\r\n r.adjust_for_ambient_noise(source)\r\n\r\n print(\"Please say something\")\r\n\r\n audio = r.listen(source)\r\n\r\n print(\"Recognizing Now .... \")\r\n\r\n\r\n # recognize speech using google\r\n\r\n try:\r\n\r\n mic_in = str(r.recognize_google(audio))\r\n \r\n # encode the new user input, add the eos_token and return a tensor in Pytorch\r\n new_user_input_ids = tokenizer.encode(mic_in + tokenizer.eos_token, return_tensors='pt')\r\n\r\n # append the new user input tokens to the chat history\r\n bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids\r\n\r\n # generated a response while limiting the total chat history to 1000 tokens, \r\n chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)\r\n\r\n # pretty print last ouput tokens from bot\r\n output = (\"{}\".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)))\r\n\r\n main(output)\r\n \r\n\r\n\r\n except Exception as e:\r\n # print(\"Error : \" + str(e))\r\n pass\r\n return main\r\n\r\n\r\n\r\n\r\n # write audio\r\n # with open(\"recorded.wav\", \"wb\") as f:\r\n # f.write(audio.get_wav_data())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main(audioop)","repo_name":"emirvader/dialo","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6886498938","text":"import vk_api \nfrom vk_api.longpoll import VkEventType, VkLongPoll \nimport time \n \ntoken = vk_api.VkApi(token=\"токен\") \nlox = VkLongPoll(token) \n\ndef send(id, text, msgid, att): \n token.method('messages.send',{'chat_id' : id, 'message' : text, 'random_id' : 0, 'attachment' : att, 'reply_to':msgid })\n \nwhile True: \n for event in lox.listen(): \n if event.type == VkEventType.MESSAGE_NEW: \n if event.from_chat: \n idUser = event.user_id \n id = event.chat_id \n msg = event.text.lower() \n msgid = event.message_id\n textspl = msg.split()\n \n if msg.startswith(\"бармить\"):\n send(id, f\"Ты сбармил {' '.join(textspl[1:])}\",msgid, '')\n\n","repo_name":"Kerkerek/barmite","sub_path":"barmite.py","file_name":"barmite.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"69916902665","text":"import copy\nimport numpy as np\n\nfrom collections import defaultdict\nfrom ark_nlp.dataset.base._dataset import BaseDataset\n\n\nclass PRGCREDataset(BaseDataset):\n \"\"\"\n 用于PRGC Bert联合关系抽取任务的Dataset\n\n Args:\n data (:obj:`DataFrame` or :obj:`string`): 数据或者数据地址\n categories (:obj:`list`, optional, defaults to `None`): 数据类别\n is_retain_df (:obj:`bool`, optional, defaults to False): 是否将DataFrame格式的原始数据复制到属性retain_df中\n is_retain_dataset (:obj:`bool`, optional, defaults to False): 是否将处理成dataset格式的原始数据复制到属性retain_dataset中\n is_train (:obj:`bool`, optional, defaults to True): 数据集是否为训练集数据\n is_test (:obj:`bool`, optional, defaults to False): 数据集是否为测试集数据\n \"\"\" # noqa: ignore flake8\"\n\n def __init__(self, *args, **kwargs):\n super(PRGCREDataset, self).__init__(*args, **kwargs)\n self.sublabel2id = {\"B-H\": 1, \"I-H\": 2, \"O\": 0}\n self.oblabel2id = {\"B-T\": 1, \"I-T\": 2, \"O\": 0}\n\n def _get_categories(self):\n return sorted(list(set([triple_[3] for data_ in self.dataset for triple_ in data_['label']])))\n\n def _convert_to_dataset(self, data_df):\n\n dataset = []\n\n data_df['text'] = data_df['text'].apply(lambda x: x.strip())\n if not self.is_test:\n data_df['label'] = data_df['label'].apply(lambda x: eval(x))\n\n feature_names = list(data_df.columns)\n for index_, row_ in enumerate(data_df.itertuples()):\n\n dataset.append({\n feature_name_: getattr(row_, feature_name_)\n for feature_name_ in feature_names\n })\n\n return dataset\n\n def _convert_to_transfomer_ids(self, tokenizer):\n self.tokenizer = tokenizer\n\n if self.is_retain_dataset:\n self.retain_dataset = copy.deepcopy(self.dataset)\n\n features = []\n for (index_, row_) in enumerate(self.dataset):\n\n text = row_['text']\n\n if len(text) > self.tokenizer.max_seq_len - 2:\n text = text[:self.tokenizer.max_seq_len - 2]\n\n tokens = self.tokenizer.tokenize(text)\n token_mapping = self.tokenizer.get_token_mapping(text, tokens, is_mapping_index=False)\n index_token_mapping = self.tokenizer.get_token_mapping(text, tokens)\n\n start_mapping = {j[0]: i for i, j in enumerate(index_token_mapping) if j}\n end_mapping = {j[-1]: i for i, j in enumerate(index_token_mapping) if j}\n\n input_ids, input_mask, segment_ids = self.tokenizer.sequence_to_ids(tokens)\n\n if not self.is_train:\n triples = []\n\n for triple in row_['label']:\n sub_head_idx = triple[1]\n sub_end_idx = triple[2]\n obj_head_idx = triple[5]\n obj_end_idx = triple[6]\n\n if sub_head_idx in start_mapping and obj_head_idx in start_mapping and sub_end_idx in end_mapping and obj_end_idx in end_mapping:\n sub_head_idx = start_mapping[sub_head_idx]\n obj_head_idx = start_mapping[obj_head_idx]\n\n triples.append((('H', sub_head_idx + 1, end_mapping[sub_end_idx] + 1 + 1),\n ('T', obj_head_idx + 1, end_mapping[obj_end_idx] + 1 + 1),\n self.cat2id[triple[3]]))\n\n feature = {\n 'input_ids': input_ids,\n 'attention_mask': input_mask,\n 'triples': triples,\n 'token_mapping': token_mapping\n }\n\n features.append(feature)\n\n else:\n corres_tag = np.zeros((\n self.tokenizer.max_seq_len,\n self.tokenizer.max_seq_len\n ))\n\n rel_tag = len(self.cat2id) * [0]\n rel_entities = defaultdict(set)\n\n for triple in row_['label']:\n sub_head_idx = triple[1]\n sub_end_idx = triple[2]\n obj_head_idx = triple[5]\n obj_end_idx = triple[6]\n\n # construct relation tag\n rel_tag[self.cat2id[triple[3]]] = 1\n\n if sub_head_idx in start_mapping and obj_head_idx in start_mapping and sub_end_idx in end_mapping and obj_end_idx in end_mapping:\n sub_head_idx = start_mapping[sub_head_idx]\n obj_head_idx = start_mapping[obj_head_idx]\n\n corres_tag[sub_head_idx+1][obj_head_idx+1] = 1\n rel_entities[self.cat2id[triple[3]]].add((sub_head_idx, end_mapping[sub_end_idx], obj_head_idx, end_mapping[obj_end_idx]))\n\n for rel, en_ll in rel_entities.items():\n # init\n tags_sub = self.tokenizer.max_seq_len * [self.sublabel2id['O']]\n tags_obj = self.tokenizer.max_seq_len * [self.oblabel2id['O']]\n\n for en in en_ll:\n # get sub and obj head\n sub_head_idx, sub_end_idx, obj_head_idx, obj_end_idx = en\n\n tags_sub[sub_head_idx + 1] = self.sublabel2id['B-H']\n tags_sub[sub_head_idx + 1 + 1: sub_end_idx + 1 + 1] = (sub_end_idx - sub_head_idx) * [self.sublabel2id['I-H']]\n\n tags_obj[obj_head_idx + 1] = self.oblabel2id['B-T']\n tags_obj[obj_head_idx + 1 + 1: obj_end_idx + 1 + 1] = (obj_end_idx - obj_head_idx) * [self.oblabel2id['I-T']]\n\n seq_tag = [tags_sub, tags_obj]\n\n feature = {\n 'input_ids': input_ids,\n 'attention_mask': input_mask,\n 'corres_tags': corres_tag,\n 'seq_tags': seq_tag,\n 'potential_rels': rel,\n 'rel_tags': rel_tag,\n 'token_mapping': token_mapping\n }\n\n features.append(feature)\n\n return features\n\n @property\n def to_device_cols(self):\n if self.is_train:\n return ['input_ids', 'attention_mask', 'corres_tags', 'seq_tags', 'potential_rels', 'rel_tags']\n else:\n return ['input_ids', 'attention_mask']\n","repo_name":"xiangking/ark-nlp","sub_path":"ark_nlp/model/re/prgc_bert/prgc_relation_extraction_dataset.py","file_name":"prgc_relation_extraction_dataset.py","file_ext":"py","file_size_in_byte":6493,"program_lang":"python","lang":"en","doc_type":"code","stars":301,"dataset":"github-code","pt":"81"} +{"seq_id":"16074613571","text":"from utilities.email.EmailDriver import Email\nfrom utilities.email.EmailVariants.EvaluatePosts import EvaluatePosts\nfrom utilities.email.EmailVariants.DailyReport import DailyReport\nfrom utilities.logger.MyLogger import MyLogger\nfrom config import config\nfrom utilities.request.Request import Request\nfrom json.decoder import JSONDecodeError\nfrom utilities.filedriver.Local import Local\n\nclass Daily:\n\n\tdef __init__(self, browser):\n\t\tself.browser = browser\n\n\tdef evaluatePosts(self):\n\t\tEmail(EvaluatePosts()).send()\n\n\tdef dailyReport(self):\n\t\tself.browser.get(\"https://www.instagram.com/\" + config.instagram_username + \"/\")\n\t\tfollowerCount = self.browser.find_element_by_css_selector(\"a[href*='followers'] span\").text\n\t\tfollowingCount = self.browser.find_element_by_css_selector(\"a[href*='following'] span\").text\n\n\t\tRequest().post(\"/report/follower-count\", {\"followerCount\": followerCount, \"followingCount\": followingCount})\n\n\t\tEmail(DailyReport(followerCount, followingCount)).send()\n\n\tdef reportErrors(self):\n\t\tMyLogger().send()\n\t\tMyLogger().empty()\n\n\tdef cleanOutScrapedImages(self):\n\t\tlocal = Local()\n\t\ttry:\n\t\t\t# fetch images that have been disapproved or that have been posted\n\t\t filepaths = Request().get(\"/post/can-be-deleted\").json()\n\t\t for filepath in filepaths:\n\t\t \t# check if the images exist in scraped images and delete them\n\t\t \tlocal.deleteIfExists(filepath)\n\n\t\texcept JSONDecodeError:\n\t\t print(\"no posts to delete\")\n\n\n\n","repo_name":"Omnidividi/insta","sub_path":"daily/Daily.py","file_name":"Daily.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39716937574","text":"from .Debug_BST import TreeNode, PrintTree, list2Tree\n\n\n# BFS with tag\nclass Solution:\n def levelOrder(self, root: TreeNode):\n self.res = []\n\n def BFSwithTag(Q):\n while Q:\n (node, level) = Q.pop(0)\n if level > len(self.res) - 1:\n self.res.append([node.val])\n else:\n self.res[level].append(node.val)\n if node.left is not None:\n Q.append((node.left, level + 1))\n if node.right is not None:\n Q.append((node.right, level + 1))\n\n if root is not None:\n queue = [(root, 0)]\n BFSwithTag(queue)\n return self.res\n\n\n\n\n\n","repo_name":"shiyutang/DL-Prep","sub_path":"04_Algorithms/Leetcode/L102. Binary Tree Level Order Traversal.py","file_name":"L102. Binary Tree Level Order Traversal.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"81"} +{"seq_id":"27511743238","text":"from CCW import Point\nfrom CCW import CCW\nimport math\nimport sys\n\n# Calculate the distance between a line ab and a point c\ndef distance(a, b, c):\n xa = a.getX()\n xb = b.getX()\n xc = c.getX()\n ya = a.getY()\n yb = b.getY()\n yc = c.getY()\n return abs((xc - xb) * (yb - ya) - (xb - xa) * (yc - yb)) / \\\n math.sqrt(math.pow(xc - xb, 2) + math.pow(yc - yb, 2))\n\n# Check whether point d is contained on triangle abc\ndef contained(a, b, c, d):\n first = CCW(a, b, d)\n second = CCW(b, c, d)\n third = CCW(c, a, d)\n # Check if d is on a line on the triangle\n if first == 0 or second == 0 or third == 0:\n return True\n # Check if d is inside of the triangle\n if first == second == third:\n return True\n return False\n\n# Print points as desired\ndef ret(a):\n print(a.getX(), a.getY())\n\n# Recursive function for points on top half of hull\ndef top(a, b, points):\n if len(points) == 0:\n return\n furthest = points[0]\n d = distance(a, b, furthest)\n # Find furthest points from line\n for i in points:\n if distance(a, b, i) > d:\n furthest = i\n d = distance(a, b, i)\n TopRight = []\n TopLeft = []\n # Consider and partition points outside of triangle a,b,furthest\n for i in points:\n if not contained(a, b, furthest, i):\n if i.getX() > furthest.getX():\n TopRight.append(i)\n elif i.getX() < furthest.getX():\n TopLeft.append(i)\n # Recursively call on TopLeft and TopRight, printing in middle\n # Such that ordering on vertices will be by increasing x coordinate\n top(a, furthest, TopLeft)\n ret(furthest)\n top(furthest, b, TopRight)\n\n# Recursive function for points on bottom half of hull\ndef bottom(a, b, points):\n if len(points) == 0:\n return\n furthest = points[0]\n d = distance(a, b, furthest)\n # Find furthest points from line\n for i in points:\n if distance(a, b, i) > d:\n furthest = i\n d = distance(a, b, i)\n BottomRight = []\n BottomLeft = []\n # Consider and partition points outside of triangle a,b,furthest\n for i in points:\n if not contained(a, b, furthest, i):\n if i.getX() > furthest.getX():\n BottomRight.append(i)\n elif i.getX() < furthest.getX():\n BottomLeft.append(i)\n # Recursively call on BottomRight and BottomLeft, printing in middle\n # Such that ordering on vertices will be by increasing x coordinate\n bottom(furthest, b, BottomRight)\n ret(furthest)\n bottom(a, furthest, BottomLeft)\n\ndef quickhull(Points):\n # Calculate mins and maxes\n Xmax = Xmin = Points[0]\n Ymin = Ymax = Point(math.inf, math.inf)\n for i in Points:\n if i.getX() < Xmin.getX():\n Xmin = i\n if i.getX() > Xmax.getX():\n Xmax = i\n Top = []\n Bottom = []\n # Partition top and bottom points\n for i in Points:\n if CCW(Xmin, Xmax, i) == -1:\n Top.append(i)\n elif CCW(Xmin, Xmax, i) == 1:\n Bottom.append(i)\n Ydis = -math.inf\n # Calculate furthest points above and below from line Xmin,Xmax\n for i in Top:\n if distance(Xmin, Xmax, i) > Ydis:\n Ymax = i\n Ydis = distance(Xmin, Xmax, i)\n Ydis = -math.inf\n for i in Bottom:\n if distance(Xmin, Xmax, i) > Ydis:\n Ymin = i\n Ydis = distance(Xmin, Xmax, i)\n TopRight = []\n TopLeft = []\n BottomRight = []\n BottomLeft = []\n # Only consider those points outside the quadrilateral\n # Partition Top and Bottom to get four arrays for recursive function calling\n for i in Top:\n if Ymax.getX() != math.inf and not contained(Xmin, Xmax, Ymax, i):\n if i.getX() > Ymax.getX():\n TopRight.append(i)\n elif i.getX() < Ymax.getX():\n TopLeft.append(i)\n for i in Bottom:\n if Ymin.getX() != math.inf and not contained(Xmin, Xmax, Ymin, i):\n if i.getX() > Ymin.getX():\n BottomRight.append(i)\n elif i.getX() < Ymin.getX():\n BottomLeft.append(i)\n # Check all four corners, in proper order, for printing\n # With increasing x coordinates\n ret(Xmin)\n top(Xmin, Ymax, TopLeft)\n if Ymax.getX() != math.inf:\n ret(Ymax)\n top(Ymax, Xmax, TopRight)\n ret(Xmax)\n bottom(Ymin, Xmax, BottomRight)\n if Ymin.getX() != math.inf:\n ret(Ymin)\n bottom(Xmin, Ymin, BottomLeft)\n\ndef main():\n arr = []\n for l in sys.stdin:\n arr += l.strip().split()\n first_int = int(arr[0])\n curr = 1\n for i in range(0, first_int):\n Points = []\n second = int(arr[curr])\n curr = curr + 1\n for j in range(curr, curr + second * 2, 2):\n point = Point(arr[j], arr[j + 1])\n Points.append(point)\n quickhull(Points)\n print(\"\\n\")\n curr = curr + second * 2\n\nif __name__ == \"__main__\":\n main()","repo_name":"Tykay99/QuickHull_CompGeo","sub_path":"QuickHullFinal.py","file_name":"QuickHullFinal.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34454401805","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom urllib import parse\nimport datetime\nimport copy\nimport os\n\ndef num_of_posts(soup):\n total_post_num = soup.find('span', {'class':'title_num'})\n result_page_num = 0\n\n if total_post_num:\n total_post_num = int(total_post_num.text.split(' ')[-1].replace(',','')[:-1])\n\n if total_post_num < 10:\n result_page_num = 1\n else:\n if total_post_num%10 == 0:\n result_page_num = int(total_post_num/10)\n else:\n result_page_num = int(total_post_num/10) + 1\n else:\n total_post_num = 0\n result_page_num = 0\n\n return total_post_num, result_page_num\n\ndef generate_url(query, mode, page_num = \"\"):\n\n day = \"2\"\n week = \"3\"\n month = \"4\"\n halfYear = \"5\"\n\n word = query.replace(query, parse.quote(query))\n url = \"https://search.naver.com/search.naver?where=post&query=\"+word+ \\\n \"&ie=utf8&st=sim&sm=tab_opt&date_from=20030520&date_to=20170724&date_option=\"+mode+ \\\n \"&srchby=title&dup_remove=1&post_blogurl=&post_blogurl_without=&nso=so%3Ar%2Ca%3Aall%2Cp%3A1d&mson=0\"\n\n if page_num:\n url += \"&start=\" + page_num\n return url\n\ndef get_buzz(query, soup):\n\n post_num, page_num = num_of_posts(soup)\n\n return [query, post_num, page_num, datetime.date.today()]\n\ndef source_extractor(url, driver):\n\n driver.get(url)\n s = (driver.page_source).encode('utf-8')\n soup = BeautifulSoup(s, \"lxml\", from_encoding='utf-8')\n\n return soup\n\ndef href_extractor(soup):\n hrefs = []\n for post in soup.find_all('a', attrs={'class':'sh_blog_title _sp_each_url _sp_each_title'}):\n hrefs.append(post['href'])\n\n return hrefs\n\ndef naver_blog_scraper(copy_list, driver):\n current_url = copy_list[1]\n current_url = current_url.replace('?Redirect=Log&logNo=', '/')\n current_url = current_url.replace('http://', 'http://m.')\n try:\n soup = source_extractor(current_url, driver)\n contents = ''\n contents_holder = soup.find_all('div', class_='se_component_wrap sect_dsc __se_component_area')\n contents_holder += soup.find_all('div', class_='post_ct ')\n contents_holder += soup.find_all('div', class_='post_ct se3_view ')\n if contents_holder:\n for paragraph_soup in contents_holder:\n sentences_soup = paragraph_soup.find_all('div')\n sentences_soup += paragraph_soup.find_all('p')\n sentences_soup += paragraph_soup.find_all('span')\n if sentences_soup:\n for text_soup in sentences_soup:\n if text_soup.text:\n temp_line = ' '.join(text_soup.text.split())\n if temp_line not in contents:\n contents += ' ' + temp_line + ' '\n except:\n print(\"Loading the following page has failed : \" + current_url)\n\n if contents:\n copy_list.append(contents)\n copy_list[1] = current_url\n else:\n print(\"No Content retrieved from the following URL : \" + current_url)\n\ndef content_scraper(list_of_posts, driver):\n #driver.set_page_load_timeout(50)\n copy_list = copy.deepcopy(list_of_posts)\n for i in range(len(copy_list)):\n for j in range(len(copy_list[i])):\n if 'naver' in copy_list[i][j][1]:\n naver_blog_scraper(copy_list[i][j], driver)\n\n return copy_list\n\ndef all_post_list(queries, mode, driver):\n buzz_per_query = []\n post_list = []\n\n for query in queries:\n current_url = generate_url(query, mode)\n soup = source_extractor(current_url, driver)\n post_num, page_num = num_of_posts(soup)\n posts_per_query = []\n\n for i in range(page_num):\n #naver search result url moves up by 10 when you move between pages (i.e. first page = 1, second page = 11, third = 31...)\n current_page = str(10*i + 1)\n current_page_url = generate_url(query, mode, current_page)\n soup = source_extractor(current_page_url, driver)\n each_post = soup.find_all('li', attrs={'class':'sh_blog_top'})\n\n for post in each_post:\n href = href_extractor(post)\n posts_per_query.append([query, href[0]])\n post_list.append(posts_per_query)\n buzz_per_query.append([query,mode,post_num])\n\n return buzz_per_query, post_list\n\ndef run_scraper(query_list, mode, driver):\n buzz_per_query, post_list = all_post_list(query_list, mode, driver)\n\n post_list = content_scraper(post_list, driver)\n\n return buzz_per_query, post_list\n\ndef main():\n path = os.getcwd() + \"\\\\phantomjs\\\\bin\\\\phantomjs\"\n driver = webdriver.PhantomJS(path)\n test = run_scraper([\"리니지m\"], \"2\", driver)\n print(test)\nmain()\n","repo_name":"kyungSong/placeholder","sub_path":"crawler/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12356823930","text":"import tkinter as tk\nfrom tkinter import messagebox\n\n# Global Variables\nnum_of_turns = 0\nturn_of_X = True\n\n\ndef play(buttons):\n \"\"\"Player will click on valid button and the mark will appear on the button.\"\"\"\n global turn_of_X, num_of_turns\n if buttons[\"text\"] == \" \" and turn_of_X == True:\n buttons[\"text\"] = \"X\"\n turn_of_X = False\n check_if_game_over()\n num_of_turns += 1\n\n elif buttons[\"text\"] == \" \" and turn_of_X == False:\n buttons[\"text\"] = \"O\"\n turn_of_X = True\n check_if_game_over()\n num_of_turns += 1\n\n else:\n messagebox.showwarning(title=\"Warning!\", message=\"Clicked already\", )\n\n\ndef check_if_game_over():\n if (button1['text'] == 'X' and button2['text'] == 'X' and button3['text'] == 'X' or\n button4['text'] == 'X' and button5['text'] == 'X' and button6['text'] == 'X' or\n button7['text'] == 'X' and button8['text'] == 'X' and button9['text'] == 'X' or\n button1['text'] == 'X' and button5['text'] == 'X' and button9['text'] == 'X' or\n button3['text'] == 'X' and button5['text'] == 'X' and button7['text'] == 'X' or\n button1['text'] == 'X' and button4['text'] == 'X' and button7['text'] == 'X' or\n button2['text'] == 'X' and button5['text'] == 'X' and button8['text'] == 'X' or\n button3['text'] == 'X' and button6['text'] == 'X' and button9['text'] == 'X'):\n quit_application(\"X\")\n\n elif num_of_turns == 8:\n quit_application(\"\")\n\n elif (button1['text'] == 'O' and button2['text'] == 'O' and button3['text'] == 'O' or\n button4['text'] == 'O' and button5['text'] == 'O' and button6['text'] == 'O' or\n button7['text'] == 'O' and button8['text'] == 'O' and button9['text'] == 'O' or\n button1['text'] == 'O' and button5['text'] == 'O' and button9['text'] == 'O' or\n button3['text'] == 'O' and button5['text'] == 'O' and button7['text'] == 'O' or\n button1['text'] == 'O' and button4['text'] == 'O' and button7['text'] == 'O' or\n button2['text'] == 'O' and button5['text'] == 'O' and button8['text'] == 'O' or\n button3['text'] == 'O' and button6['text'] == 'O' and button9['text'] == 'O'):\n quit_application(\"O\")\n \"\"\"we are checking if the game is over or not by checking the win and tie conditions\"\"\"\n\n\ndef clear_button():\n \"\"\"Clearing the sign from the buttons.\"\"\"\n button1[\"text\"] = \" \"\n button2[\"text\"] = \" \"\n button3[\"text\"] = \" \"\n button4[\"text\"] = \" \"\n button5[\"text\"] = \" \"\n button6[\"text\"] = \" \"\n button7[\"text\"] = \" \"\n button8[\"text\"] = \" \"\n button9[\"text\"] = \" \"\n\n\n# def disableButton():\n# button1.configure(state='disabled')\n# button2.configure(state=DISABLED)\n# button3.configure(state=DISABLED)\n# button4.configure(state=DISABLED)\n# button5.configure(state=DISABLED)\n# button6.configure(state=DISABLED)\n# button7.configure(state=DISABLED)\n# button8.configure(state=DISABLED)\n# button9.configure(state=DISABLED)\n\n\ndef restart_game():\n global num_of_turns\n global turn_of_X\n \"\"\"To restart the game we just need to clear sign from buttons and set number of turns to zero.\"\"\"\n num_of_turns = 0\n turn_of_X = True\n clear_button()\n # disableButton()\n\n\ndef quit_application(winner):\n \"\"\"Ask user to restart or quit the game.\"\"\"\n if winner == \"X\" or winner == \"O\":\n message_box = tk.messagebox.askquestion(\"Tic Tac Toe\", \"{} has won the game.\\n\"\n \"Do want to quit the application\".format(\n winner),\n icon='warning')\n else:\n message_box = tk.messagebox.askquestion(\"Tic Tac Toe\", \"It is a Draw!\\nDo you want to quit the application\",\n icon='warning')\n if message_box == 'yes':\n root.destroy()\n else:\n tk.messagebox.showinfo('Tic Tac Toe', \"Restarting the game...\")\n restart_game()\n\n\n# From here I will define the GUI code\nroot = tk.Tk()\nroot.title(\"Tic Tac Toe\")\nmin_width, min_height = 750, 700\nroot.minsize(min_width, min_height)\n# given line of code will make our whole window transparent. I have commented it because it was not very pleasing to see\n# root.wm_attributes(\"-alpha\", 0.75)\n\nbackground_image = tk.PhotoImage(file=\"./game.png\")\n\nbackground_label = tk.Label(root, image=background_image)\nbackground_label.place(x=0, y=0, relwidth=1, relheight=1)\n\nnotice_label = tk.Label(\n root, text=\"Welcome to a tic tac toe game \\n \\t by Pritesh Soni.\", font=(\"times new roman\", 20))\nnotice_label.place(relx=0.2, rely=0.1, relwidth=0.6, relheight=0.1)\n\n# These are buttons with 0.19*0.19 relsize so that there is a thin line between them, makes it look pretty.\n\nbutton1 = tk.Button(root, text=\" \", command=lambda: play(\n button1), font=(\"times new roman\", 22))\nbutton1.place(relx=0.2, rely=0.3, relwidth=0.19, relheight=0.19)\nbutton2 = tk.Button(root, text=\" \", command=lambda: play(\n button2), font=(\"times new roman\", 22))\nbutton2.place(relx=0.4, rely=0.3, relwidth=0.19, relheight=0.19)\nbutton3 = tk.Button(root, text=\" \", command=lambda: play(\n button3), font=(\"times new roman\", 22))\nbutton3.place(relx=0.6, rely=0.3, relwidth=0.19, relheight=0.19)\nbutton4 = tk.Button(root, text=\" \", command=lambda: play(\n button4), font=(\"times new roman\", 22))\nbutton4.place(relx=0.2, rely=0.5, relwidth=0.19, relheight=0.19)\nbutton5 = tk.Button(root, text=\" \", command=lambda: play(\n button5), font=(\"times new roman\", 22))\nbutton5.place(relx=0.4, rely=0.5, relwidth=0.19, relheight=0.19)\nbutton6 = tk.Button(root, text=\" \", command=lambda: play(\n button6), font=(\"times new roman\", 22))\nbutton6.place(relx=0.6, rely=0.5, relwidth=0.19, relheight=0.19)\nbutton7 = tk.Button(root, text=\" \", command=lambda: play(\n button7), font=(\"times new roman\", 22))\nbutton7.place(relx=0.2, rely=0.7, relwidth=0.19, relheight=0.19)\nbutton8 = tk.Button(root, text=\" \", command=lambda: play(\n button8), font=(\"times new roman\", 22))\nbutton8.place(relx=0.4, rely=0.7, relwidth=0.19, relheight=0.19)\nbutton9 = tk.Button(root, text=\" \", command=lambda: play(\n button9), font=(\"times new roman\", 22))\n\nbutton1 = tk.Button(root, text=\" \", command=lambda: play(\n button1), font=(\"times new roman\", 22))\nbutton1.place(relx=0.2, rely=0.3, relwidth=0.19, relheight=0.19)\nbutton2 = tk.Button(root, text=\" \", command=lambda: play(\n button2), font=(\"times new roman\", 22))\nbutton2.place(relx=0.4, rely=0.3, relwidth=0.19, relheight=0.19)\nbutton3 = tk.Button(root, text=\" \", command=lambda: play(\n button3), font=(\"times new roman\", 22))\nbutton3.place(relx=0.6, rely=0.3, relwidth=0.19, relheight=0.19)\nbutton4 = tk.Button(root, text=\" \", command=lambda: play(\n button4), font=(\"times new roman\", 22))\nbutton4.place(relx=0.2, rely=0.5, relwidth=0.19, relheight=0.19)\nbutton5 = tk.Button(root, text=\" \", command=lambda: play(\n button5), font=(\"times new roman\", 22))\nbutton5.place(relx=0.4, rely=0.5, relwidth=0.19, relheight=0.19)\nbutton6 = tk.Button(root, text=\" \", command=lambda: play(\n button6), font=(\"times new roman\", 22))\nbutton6.place(relx=0.6, rely=0.5, relwidth=0.19, relheight=0.19)\nbutton7 = tk.Button(root, text=\" \", command=lambda: play(\n button7), font=(\"times new roman\", 22))\nbutton7.place(relx=0.2, rely=0.7, relwidth=0.19, relheight=0.19)\nbutton8 = tk.Button(root, text=\" \", command=lambda: play(\n button8), font=(\"times new roman\", 22))\nbutton8.place(relx=0.4, rely=0.7, relwidth=0.19, relheight=0.19)\nbutton9 = tk.Button(root, text=\" \", command=lambda: play(\n button9), font=(\"times new roman\", 22))\n\nbutton9.place(relx=0.6, rely=0.7, relwidth=0.19, relheight=0.19)\n\nclear_screen = tk.Button(root, text=\"Clear Screen\", font=(\n \"times new roman\", 22), command=restart_game)\nclear_screen.place(relx=0.38, rely=0.21, relwidth=0.24, relheight=0.08)\n\nroot.mainloop()\n","repo_name":"PriteshSoni221/Tic_tac_toe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8023,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30908567548","text":"# -*- coding: UTF-8 -*-\n\nfrom utils import FakeStream\nfrom s3_sync import utils\nimport mock\nimport unittest\n\n\nclass TestUtilsFunctions(unittest.TestCase):\n def test_s3_headers_conversion(self):\n input_hdrs = {'x-object-meta-foo': 'Foo',\n 'x-object-meta-Bar': 'Bar',\n 'X-Object-Meta-upper': '1',\n 'X-ObJeCT-Meta-CraZy': 'CrAzY',\n 'X-Object-Manifest': 'container/key/123415/prefix',\n 'Content-Type': 'application/testing'}\n out = utils.convert_to_s3_headers(input_hdrs)\n expected = dict([(key[len('x-object-meta-'):].lower(), value) for\n key, value in input_hdrs.items() if\n key.lower().startswith(utils.SWIFT_USER_META_PREFIX)])\n expected[utils.MANIFEST_HEADER] = input_hdrs['X-Object-Manifest']\n self.assertEqual(set(expected.keys()), set(out.keys()))\n for key in out.keys():\n self.assertEqual(expected[key], out[key])\n\n def test_get_slo_etag(self):\n sample_manifest = [{'hash': 'abcdef'}, {'hash': 'fedcba'}]\n # We expect the md5 sum of the concatenated strings (converted to hex\n # bytes) followed by the number of parts (segments)\n expected_tag = 'ce7989f0e2f1f3e4fdd2a01dda0844ae-2'\n self.assertEqual(expected_tag, utils.get_slo_etag(sample_manifest))\n\n\nclass FakeSwift(object):\n def __init__(self):\n self.size = 1024\n self.status = 200\n\n def get_object(self, account, container, key, headers={}):\n self.fake_stream = FakeStream(self.size)\n return (self.status,\n {'Content-Length': self.size},\n self.fake_stream)\n\n\nclass TestFileWrapper(unittest.TestCase):\n def setUp(self):\n self.mock_swift = FakeSwift()\n\n def test_open(self):\n wrapper = utils.FileWrapper(self.mock_swift,\n 'account',\n 'container',\n 'key')\n self.assertEqual(1024, len(wrapper))\n\n def test_seek(self):\n wrapper = utils.FileWrapper(self.mock_swift,\n 'account',\n 'container',\n 'key')\n wrapper.read(256)\n wrapper.seek(0)\n self.assertEqual(0, self.mock_swift.fake_stream.current_pos)\n\n\nclass TestSLOFileWrapper(unittest.TestCase):\n def setUp(self):\n self.manifest = [\n {'name': '/foo/part1',\n 'bytes': 500},\n {'name': '/foo/part2',\n 'bytes': 1000}\n ]\n self.swift = mock.Mock()\n\n def test_slo_length(self):\n slo = utils.SLOFileWrapper(self.swift, 'account', self.manifest,\n {'etag': 'deadbeef'})\n self.assertEqual(1500, len(slo))\n\n def test_slo_headers(self):\n slo = utils.SLOFileWrapper(self.swift, 'account', self.manifest,\n {'etag': 'deadbeef'})\n\n self.assertEqual(1500, len(slo))\n self.assertEqual(\n 'deadbeef', slo.get_s3_headers()['swift-slo-etag'])\n\n def test_seek_after_read(self):\n fake_segment = FakeStream(content='A' * 500)\n self.assertEqual(False, fake_segment.closed)\n\n def get_object(account, container, key, headers={}):\n if account != 'account':\n raise RuntimeError('unknown account')\n if container != 'foo':\n raise RuntimeError('unknown container')\n if key == 'part1':\n return (200, {'Content-Length': 500}, fake_segment)\n raise RuntimeError('unknown key')\n\n self.swift.get_object.side_effect = get_object\n slo = utils.SLOFileWrapper(self.swift, 'account', self.manifest,\n {'etag': 'deadbeef'})\n data = slo.read()\n slo.seek(0)\n self.assertEqual(True, fake_segment.closed)\n self.assertEqual('A' * 500, data)\n self.swift.get_object.assert_called_once_with(\n 'account', 'foo', 'part1', headers={})\n\n def test_read_manifest(self):\n part1_content = FakeStream(content='A' * 500)\n part2_content = FakeStream(content='B' * 1000)\n\n def get_object(account, container, key, headers={}):\n if account != 'account':\n raise RuntimeError('unknown account')\n if container != 'foo':\n raise RuntimeError('unknown container')\n if key == 'part1':\n return (200, {'Content-Length': 500}, part1_content)\n if key == 'part2':\n return (200, {'Content-Length': 1000}, part2_content)\n raise RuntimeError('unknown key')\n\n self.swift.get_object.side_effect = get_object\n slo = utils.SLOFileWrapper(self.swift, 'account', self.manifest,\n {'etag': 'deadbeef'})\n content = ''\n while True:\n data = slo.read()\n content += data\n if not data:\n break\n self.assertEqual(1500, len(content))\n self.assertEqual('A' * 500, content[0:500])\n self.assertEqual('B' * 1000, content[500:1500])\n\n self.swift.get_object.has_calls(\n mock.call('account', 'foo', 'part1', {}),\n mock.call('account', 'foo', 'part2', {}))\n self.assertEqual(True, part1_content.closed)\n self.assertEqual(True, part2_content.closed)\n","repo_name":"caiobrentano/swift-s3-sync","sub_path":"test/unit/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19672060950","text":"from common.input_loader import load_input\n\n\ndata = load_input(2)\n\n\ndef solve_1():\n x, y, c = 0, 0, {'up': (0, -1), 'down': (0, 1), 'forward': (1, 0)}\n for instr, value in [(i, int(v)) for i, v in [l.split(' ') for l in data]]:\n x += value * c[instr][0]\n y += value * c[instr][1]\n return x * y\n\n\ndef solve_2():\n x, y, a, c = 0, 0, 0, {'up': (0, 0, -1), 'down': (0, 0, 1), 'forward': (1, 1, 0)}\n for instr, value in [(i, int(v)) for i, v in [l.split(' ') for l in data]]:\n x += value * c[instr][0]\n y += value * c[instr][1] * a\n a += value * c[instr][2]\n return x * y\n\n\nprint(solve_1())\nprint(solve_2())\n","repo_name":"johnBuffer/AdventOfCode-2021","sub_path":"02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"5901081997","text":"import random\nfrom typing import List\n\n\ndef merge_lists(inputs: List[List[int]]) -> List[int]:\n \"\"\"\n Merges an arbitrary number of sorted lists of integers\n into a single sorted list of integers.\n Iterates over the input list of lists.\n On each iteration, selects pairs (a, b) of lists to merge,\n copying the merged list over list a.\n The outer loop doubles the value of leap starting at 1\n and ending where leap equals the number of input lists.\n The inner loop then finds pairs (i, i + leap), where\n i ranges from 0 to num_lists, incrementing by 2 * leap.\n For k = 8 input lists, the pairs of list indices used for merging\n across all outer loop iterations would be\n (0, 1) -> 0, (2, 3) -> 2, (4, 5) -> 4, (6, 7) -> 6\n (0, 2) -> 0, (4, 6) -> 4\n (0, 4) -> 0\n\n :param inputs: list of sorted lists of integers\n :return sorted list of integers\n \"\"\"\n num_lists = len(inputs)\n leap = 1\n while leap < num_lists:\n i = 0\n while i < num_lists:\n if i + leap < num_lists:\n inputs[i] = merge_two_lists(inputs[i], inputs[i + leap])\n i += leap * 2\n leap *= 2\n return inputs[0]\n\n\ndef merge_two_lists(list_a: List[int], list_b: List[int]) -> List[int]:\n \"\"\"\n Helper function to merge elements of list_a and list_b into\n a single list, which it returns.\n\n :param list_a: list of integers to merge with those in list_b into a single list\n :param list_b: list of integers to merge with those in list_a into a single list\n :return:\n \"\"\"\n output = []\n while len(list_a) or len(list_b):\n if len(list_b) and (not len(list_a) or list_b[0] < list_a[0]):\n output.append(list_b[0])\n list_b.pop(0)\n else:\n output.append(list_a[0])\n list_a.pop(0)\n return output\n\n\nif __name__ == '__main__':\n input_lists = []\n input_sizes = [5, 4, 3, 6, 7, 4, 5, 2]\n random.seed(1)\n for input_size in input_sizes:\n new_list = random.sample(range(0, 100), input_size)\n new_list.sort()\n input_lists.append(new_list)\n print('Inputs:')\n for input_list in input_lists:\n print(input_list)\n output_list = merge_lists(input_lists)\n print()\n print('Outputs:')\n print(output_list)\n","repo_name":"PhoenixTAN/CS-591-Parallel-Computing","sub_path":"Code/merge-k-lists/merge_lists_pairs.py","file_name":"merge_lists_pairs.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34646158372","text":"import os\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom body_model import SMPL_JOINTS, KEYPT_VERTS, smpl_to_openpose\nfrom geometry.rotation import batch_rodrigues, rotation_matrix_to_angle_axis\nfrom util.logger import Logger\nfrom util.tensor import (\n detach_all,\n scatter_intervals,\n select_intervals,\n get_scatter_mask,\n)\n\nfrom .base_scene import BaseSceneModel\nfrom .helpers import compute_world2prior, estimate_floor_planes\n\n\nJ_BODY = len(SMPL_JOINTS) - 1 # no root\nCONTACT_ORDERING = [\n \"hips\",\n \"leftLeg\",\n \"rightLeg\",\n \"leftFoot\",\n \"rightFoot\",\n \"leftToeBase\",\n \"rightToeBase\",\n \"leftHand\",\n \"rightHand\",\n]\nCONTACT_INDS = [SMPL_JOINTS[jname] for jname in CONTACT_ORDERING]\nCONTACT_THRESH = 0.5\n\n\nclass MovingSceneModel(BaseSceneModel):\n \"\"\"\n Scene model of moving people in a shared global reference frame\n\n Parameters:\n batch_size: number of sequences to optimize\n seq_len: length of the sequences\n pose_prior: VPoser model\n motion_prior: humor model\n init_motion_prior: dict of GMM params to use for prior on initial motion state\n shared_floor: (default True) if true, sequences are in the same reference frame\n fit_gender: (optional) gender of SMPL model\n \"\"\"\n\n def __init__(\n self,\n batch_size,\n seq_len,\n body_model,\n pose_prior,\n motion_prior,\n init_motion_prior=None,\n fit_gender=\"neutral\",\n use_init=False,\n opt_cams=False,\n opt_scale=True,\n cam_graph=False,\n est_floor=True,\n floor_type=\"shared\",\n async_tracks=True,\n **kwargs,\n ):\n super().__init__(\n batch_size,\n seq_len,\n body_model,\n pose_prior,\n fit_gender=fit_gender,\n use_init=use_init,\n opt_cams=opt_cams,\n opt_scale=opt_scale,\n cam_graph=cam_graph,\n )\n assert motion_prior is not None\n assert motion_prior.model_data_config in [\n \"smpl+joints\",\n \"smpl+joints+contacts\",\n ], \"Only smpl+joints motion prior configuration is supported!\"\n\n self.motion_prior = motion_prior\n self.init_motion_prior = init_motion_prior\n\n # need latent dynamics sequence as well\n self.latent_motion_dim = self.motion_prior.latent_size\n self.cond_prior = self.motion_prior.use_conditional_prior\n\n # the frame chosen to use for the initial state (first frame by default)\n self.async_tracks = async_tracks\n self.register_buffer(\"track_start\", torch.zeros(self.batch_size))\n self.register_buffer(\"track_end\", torch.ones(self.batch_size) * self.seq_len)\n\n self.shared_floor = floor_type == \"shared\"\n self.group_floor = floor_type == \"group\"\n self.floor_type = floor_type\n print(\"FLOOR TYPE\", floor_type)\n self.est_floor = est_floor\n\n @property\n def is_motion_active(self):\n return hasattr(self.params, \"latent_motion\")\n\n def initialize(self, obs_data, cam_data, param_dict, data_fps):\n \"\"\"\n we need to also optimize for floor and world scale\n \"\"\"\n Logger.log(\"Initializing moving scene model with observed data\")\n\n self.params.set_cameras(\n cam_data,\n opt_scale=self.opt_scale,\n opt_cams=self.opt_cams,\n opt_focal=self.opt_cams,\n **param_dict,\n )\n self.init_floor(obs_data, param_dict)\n self.init_first_state(obs_data, param_dict, data_fps)\n\n def init_floor(self, obs_data, param_dict):\n if self.est_floor or self.group_floor:\n with torch.no_grad():\n smpl_preds = self.pred_smpl(\n param_dict[\"trans\"],\n param_dict[\"root_orient\"],\n self.latent2pose(param_dict[\"latent_pose\"]),\n param_dict[\"betas\"],\n )\n floor_plane, floor_idcs = estimate_floor_planes(\n smpl_preds[\"joints3d\"].detach(),\n obs_data[\"vis_mask\"] > 0,\n group=self.group_floor,\n flatten=self.shared_floor,\n )\n if self.group_floor and not self.est_floor:\n # don't use the estimated floor as initial plane\n floor_plane = obs_data[\"floor_plane\"][: len(floor_plane)]\n else: # fixed shared or separate floors\n num_floors = 1 if self.shared_floor else self.batch_size\n floor_plane = obs_data[\"floor_plane\"][:num_floors]\n if self.shared_floor:\n floor_idcs = torch.zeros(\n self.batch_size, dtype=torch.long, device=floor_plane.device\n )\n else:\n floor_idcs = torch.arange(\n self.batch_size, dtype=torch.long, device=floor_plane.device\n )\n\n Logger.log(f\"ESTIMATED FLOORS: {str(floor_plane.detach().cpu())}\")\n Logger.log(f\"FLOOR IDCS: {str(floor_idcs.detach().cpu())}\")\n self.params.set_param(\"floor_plane\", floor_plane.float().detach())\n self.params.set_param(\n \"floor_idcs\", floor_idcs.long().detach(), requires_grad=False\n )\n\n def init_first_state(self, obs_data, param_dict, data_fps):\n \"\"\"\n initialize the latent motion and first state of each track\n using per-frame trajectories of trans, rot, latent_pose, betas\n and observed data\n \"\"\"\n B, T = self.batch_size, self.seq_len\n param_dict = detach_all(param_dict)\n\n # select the valid segments of each track; pad the shorter tracks with final element\n if self.async_tracks and \"track_interval\" in obs_data:\n print(\"asynchronous tracks\")\n interval = obs_data[\"track_interval\"] # (B, 2)\n start, end = interval[:, 0], interval[:, 1]\n self.track_start, self.track_end = start, end\n param_dict = select_dict_segments(\n param_dict, start, end, names=[\"trans\", \"root_orient\", \"latent_pose\"]\n )\n\n trans = param_dict[\"trans\"] # (B, T, 3)\n root_orient = param_dict[\"root_orient\"] # (B, T, 3)\n latent_pose = param_dict[\"latent_pose\"] # (B, T, D)\n betas = param_dict[\"betas\"] # (B, b)\n\n Logger.log(f\"SEL TRACKS {trans.shape}, {root_orient.shape}\")\n\n # save each track's first appearance\n self.params.set_param(\"trans\", trans[:, :1])\n Logger.log(f\"INITIAL TRANS {trans[:, :1].detach().cpu()}\")\n self.params.set_param(\"root_orient\", root_orient[:, :1])\n self.params.set_param(\"latent_pose\", latent_pose[:, :1])\n self.params.set_param(\"betas\", betas)\n\n # pass the current pose estimates through the motion prior\n self.data_fps = data_fps\n body_pose = self.latent2pose(latent_pose)\n init_latent = self.infer_latent_motion(\n trans, root_orient, body_pose, betas, data_fps\n ).detach() # (B, T-1, D)\n self.params.set_param(\"latent_motion\", init_latent)\n\n # estimate velocities in prior frame and save initial state\n trans_vel, joints_vel, root_orient_vel = self.estimate_prior_velocities(\n trans, root_orient, body_pose, betas, data_fps\n )\n self.params.set_param(\"trans_vel\", trans_vel[:, :1].detach())\n self.params.set_param(\"joints_vel\", joints_vel[:, :1].detach())\n self.params.set_param(\"root_orient_vel\", root_orient_vel[:, :1].detach())\n\n def get_optim_result(self, num_steps=-1):\n res = super().get_optim_result()\n if not self.is_motion_active:\n return res\n\n num_steps = self.seq_len if num_steps < 0 else num_steps\n prior_res, world_res = self.rollout_latent_motion(\n self.params.latent_motion[:, : num_steps - 1]\n )\n world_res = self.synchronize_preds(world_res, num_steps)\n res[\"world\"].update(world_res)\n\n prior_res = self.synchronize_preds(prior_res, num_steps)\n res[\"prior\"] = prior_res\n return res\n\n def estimate_prior_velocities(self, trans, root_orient, body_pose, betas, data_fps):\n \"\"\"\n compute velocities of trajectory in prior frame (not world frame)\n Transforms the first of each track into the prior frame\n :param trans (B, T, 3)\n :param root_orient (B, T, 3)\n :param body_pose (B, T, Dp)\n :param betas (B, Db)\n \"\"\"\n with torch.no_grad():\n self.update_world2prior(trans, root_orient, body_pose, betas)\n trans, root_orient = self.apply_world2prior(\n trans, root_orient, body_pose, betas\n )\n smpl_results = self.pred_smpl(trans, root_orient, body_pose, betas)\n return estimate_velocities(\n trans, root_orient, smpl_results[\"joints3d\"], data_fps\n )\n\n def update_world2prior(self, trans, root_orient, body_pose, betas):\n B = trans.shape[0]\n smpl_data = self.pred_smpl(\n trans[:, :1], root_orient[:, :1], body_pose[:, :1], betas\n )\n floor_plane = self.params.floor_plane[self.params.floor_idcs]\n R, t, height = compute_world2prior(\n floor_plane,\n trans[:, 0],\n root_orient[:, 0],\n smpl_data[\"joints3d\"][:, 0, 0],\n )\n self.world2prior_R = R # (B, 3, 3)\n self.world2prior_t = t # (B, 3)\n self.world2prior_root_height = height # (B)\n\n def apply_world2prior(self, trans, root_orient, body_pose, betas, inverse=False):\n \"\"\"\n Applies the world2prior tranformation to trans, root_orient\n If T=1, this function assumes they are at key_frame_idx,\n which we need to compute the offset from the origin\n :param trans (B, T, 3)\n :param root_orient (B, T, 3)\n :param body_pose (B, T, J, 3)\n :param betas (B, b)\n :param inverse (bool) optional, default False\n \"\"\"\n B, T, _ = trans.size()\n # (B, 3, 3), (B, 3), (B)\n R, t, root_height = (\n self.world2prior_R,\n self.world2prior_t,\n self.world2prior_root_height,\n )\n R_time = R.unsqueeze(1).expand((B, T, 3, 3))\n t_time = t.unsqueeze(1).expand((B, T, 3))\n root_orient_mat = batch_rodrigues(root_orient.reshape(-1, 3)).reshape(\n B, T, 3, 3\n )\n\n if inverse:\n R_time = R_time.transpose(-1, -2)\n\n root_orient_mat = torch.matmul(R_time, root_orient_mat)\n root_orient = rotation_matrix_to_angle_axis(\n root_orient_mat.reshape(B * T, 3, 3)\n ).reshape(B, T, 3)\n\n if inverse:\n # transform so first frame is at origin\n trans = trans - trans[:, :1, :]\n\n # rotates to world frame\n trans = torch.matmul(R_time, trans[..., None])[..., 0]\n # translate to world frame\n trans = trans - t_time\n\n return trans, root_orient\n\n # first transform so the trans of key frame is at origin\n trans = trans + t_time\n # then rotate to canonical frame\n trans = torch.matmul(R_time, trans[..., None])[..., 0]\n # compute the root height after transforming\n cur_smpl_data = self.pred_smpl(trans, root_orient, body_pose, betas)\n cur_root_height = cur_smpl_data[\"joints3d\"][:, 0, 0, 2:3]\n # then apply floor offset so the root joint is at root_height\n height_diff = root_height - cur_root_height\n trans_offset = torch.cat(\n [torch.zeros((B, 2)).to(height_diff), height_diff], axis=1\n )\n trans = trans + trans_offset.reshape((B, 1, 3))\n\n return trans, root_orient\n\n def convert_prior_rot_inputs(self, root_orient, body_pose):\n # convert rots\n # body pose and root orient are both in aa\n B, T = root_orient.shape[:2]\n if (\n self.motion_prior.in_rot_rep == \"mat\"\n or self.motion_prior.in_rot_rep == \"6d\"\n ):\n root_orient_in = batch_rodrigues(root_orient.reshape(-1, 3)).reshape(\n (B, T, 9)\n )\n body_pose_in = batch_rodrigues(body_pose.reshape(-1, 3)).reshape(\n (B, T, J_BODY * 9)\n )\n if self.motion_prior.in_rot_rep == \"6d\":\n root_orient_in = root_orient[:, :, :6]\n body_pose_in = body_pose.reshape((B, T, J_BODY, 9))[:, :, :, :6].reshape(\n (B, T, J_BODY * 6)\n )\n return root_orient_in, body_pose_in\n\n def convert_prior_rot_outputs(self, out_dict):\n keys = [\"root_orient\", \"pose_body\"]\n for k in keys:\n out = out_dict[k]\n B, T = out.shape[:2]\n out_dict[k] = rotation_matrix_to_angle_axis(\n out.reshape((-1, 3, 3))\n ).reshape((B, T, -1))\n return out_dict\n\n def infer_latent_motion(\n self, trans, root_orient, body_pose, betas, data_fps, full_forward_pass=False\n ):\n \"\"\"\n By default, gets a sequence of z's from the current SMPL optim params.\n\n If full_forward_pass is true, also samples from the posterior and feeds\n through the motion prior decoder to get all terms needed to calculate the ELBO.\n \"\"\"\n B, T, _ = trans.size()\n h = 1.0 / data_fps\n\n # need to first transform into canonical coordinate frame\n self.update_world2prior(trans, root_orient, body_pose, betas)\n trans, root_orient = self.apply_world2prior(\n trans, root_orient, body_pose, betas\n )\n\n smpl_results = self.pred_smpl(trans, root_orient, body_pose, betas)\n joints = smpl_results[\"joints3d\"] # (B, T, len(SMPL_JOINTS), 3)\n trans_vel, joints_vel, root_orient_vel = estimate_velocities(\n trans, root_orient, joints, data_fps\n )\n\n root_orient_in, body_pose_in = self.convert_prior_rot_inputs(\n root_orient, body_pose\n )\n joints_in = joints.reshape((B, T, -1))\n joints_vel_in = joints_vel.reshape((B, T, -1))\n\n seq_dict = {\n \"trans\": trans,\n \"trans_vel\": trans_vel,\n \"root_orient\": root_orient_in,\n \"root_orient_vel\": root_orient_vel,\n \"pose_body\": body_pose_in,\n \"joints\": joints_in,\n \"joints_vel\": joints_vel_in,\n }\n\n infer_results = self.motion_prior.infer_global_seq(\n seq_dict, full_forward_pass=full_forward_pass\n )\n if full_forward_pass:\n # return both the given motion and the one from the forward pass\n # make sure rotations are matrix\n # NOTE: assumes seq_dict is same thing we want to compute loss on\n # Need to change if multiple future steps.\n if self.motion_prior.in_rot_rep != \"mat\":\n seq_dict[\"trans\"] = batch_rodrigues(root_orient.reshape(-1, 3)).reshape(\n (B, T, 9)\n )\n seq_dict[\"pose_body\"] = batch_rodrigues(\n body_pose.reshape(-1, 3)\n ).reshape((B, T, J_BODY * 9))\n # do not need initial step anymore since output will be T-1\n for k, v in seq_dict.items():\n seq_dict[k] = v[:, 1:]\n for k in infer_results.keys():\n if k != \"posterior_distrib\" and k != \"prior_distrib\":\n infer_results[k] = infer_results[k][\n :, :, 0\n ] # only want first output step\n infer_results = (seq_dict, infer_results)\n else:\n prior_z, posterior_z = infer_results\n infer_results = posterior_z[0] # mean of the approximate posterior\n\n return infer_results\n\n def rollout_smpl_steps(self, num_steps=-1):\n # rollout motion given initial state\n num_steps = self.seq_len if num_steps < 0 else num_steps\n latent_motion = self.params.latent_motion[:, : num_steps - 1]\n\n # roll out tracks from their first appearances\n # NOTE rolled out tracks are unsynced in time\n res, world_res = self.rollout_latent_motion(\n latent_motion, return_prior=self.cond_prior\n )\n\n # get the smpl predictions for the unsynced tracks\n preds = self.pred_smpl(\n res[\"trans\"], res[\"root_orient\"], res[\"pose_body\"], res[\"betas\"]\n )\n world_preds = self.pred_smpl(\n world_res[\"trans\"],\n world_res[\"root_orient\"],\n world_res[\"pose_body\"],\n world_res[\"betas\"],\n )\n\n # pass along relevant results\n preds[\"joints3d_rollout\"] = res[\"joints\"]\n preds[\"latent_pose\"] = self.pose2latent(res[\"pose_body\"])\n preds[\"joints3d_init\"] = preds[\"joints3d\"][:, :1]\n\n if \"contacts\" in res:\n preds[\"contacts\"] = res[\"contacts\"]\n if \"contacts_conf\" in res:\n preds[\"contacts_conf\"] = res[\"contacts_conf\"]\n\n # synchronize the tracklets to the same timesteps\n preds = self.synchronize_preds(preds, num_steps)\n world_preds = self.synchronize_preds(world_preds, num_steps)\n\n # pass along unsynchronized motion latents\n preds[\"latent_motion\"] = latent_motion\n if \"cond_prior\" in res:\n preds[\"cond_prior\"] = res[\"cond_prior\"]\n\n return preds, world_preds\n\n def rollout_latent_motion(\n self,\n latent_motion,\n return_prior=False,\n return_vel=False,\n num_steps=-1,\n canonicalize_input=False,\n ):\n \"\"\"\n From the stored initial state, rolls out a sequence from the latent_motion vector\n NOTE: the stored initial states are not at the same time step, but we return all\n predicted sequences starting from time 0.\n\n If latent_motion is None, samples num_steps into the future sequence from the prior.\n using the mean of the prior rather than random samples.\n\n If canonicalize_input is True, transform the initial state into the local canonical\n frame before roll out\n \"\"\"\n # get the first frame state\n # NOTE: first states do not occur at same time step\n trans, root_orient, betas = (\n self.params.trans,\n self.params.root_orient,\n self.params.betas,\n )\n trans_vel, root_orient_vel, joints_vel = (\n self.params.trans_vel,\n self.params.root_orient_vel,\n self.params.joints_vel,\n )\n body_pose = self.latent2pose(self.params.latent_pose)\n\n B = trans.size(0)\n is_sampling = latent_motion is None\n Tm1 = num_steps if latent_motion is None else latent_motion.size(1)\n if is_sampling and Tm1 <= 0:\n Logger.log(\"num_steps must be positive to sample!\")\n exit()\n\n # need to first transform initial state into canonical coordinate frame\n self.update_world2prior(trans, root_orient, body_pose, betas)\n trans, root_orient = self.apply_world2prior(\n trans, root_orient, body_pose, betas\n )\n\n smpl_results = self.pred_smpl(trans, root_orient, body_pose, betas)\n joints = smpl_results[\"joints3d\"] # (B, T, len(SMPL_JOINTS), 3)\n\n # update to correct rotations for input\n root_orient_in, body_pose_in = self.convert_prior_rot_inputs(\n root_orient, body_pose\n )\n joints_in = joints.reshape((B, 1, -1))\n joints_vel_in = joints_vel.reshape((B, 1, -1))\n\n rollout_in_dict = {\n \"trans\": trans,\n \"root_orient\": root_orient_in,\n \"pose_body\": body_pose_in,\n \"joints\": joints_in,\n \"trans_vel\": trans_vel,\n \"root_orient_vel\": root_orient_vel,\n \"joints_vel\": joints_vel_in,\n }\n\n roll_output = self.motion_prior.roll_out(\n None,\n rollout_in_dict,\n Tm1,\n z_seq=latent_motion,\n return_prior=return_prior,\n return_z=is_sampling,\n canonicalize_input=canonicalize_input,\n gender=[self.fit_gender] * B,\n betas=betas.reshape((B, 1, -1)),\n )\n\n pred_dict = prior_out = None\n if return_prior:\n pred_dict, prior_out = roll_output\n else:\n pred_dict = roll_output\n\n pred_dict = self.convert_prior_rot_outputs(pred_dict)\n\n # concat with initial state\n trans_out = torch.cat([trans, pred_dict[\"trans\"]], dim=1)\n root_orient_out = torch.cat([root_orient, pred_dict[\"root_orient\"]], dim=1)\n body_pose_out = torch.cat([body_pose, pred_dict[\"pose_body\"]], dim=1)\n joints_out = torch.cat(\n [joints, pred_dict[\"joints\"].reshape((B, Tm1, -1, 3))], dim=1\n )\n out_dict = {\n \"trans\": trans_out,\n \"root_orient\": root_orient_out,\n \"pose_body\": body_pose_out,\n \"joints\": joints_out,\n \"betas\": betas,\n }\n if return_vel:\n out_dict[\"trans_vel\"] = torch.cat(\n [trans_vel, pred_dict[\"trans_vel\"]], dim=1\n )\n out_dict[\"root_orient_vel\"] = torch.cat(\n [root_orient_vel, pred_dict[\"root_orient_vel\"]], dim=1\n )\n out_dict[\"joints_vel\"] = torch.cat(\n [joints_vel, pred_dict[\"joints_vel\"].reshape((B, Tm1, -1, 3))],\n dim=1,\n )\n\n if return_prior: # return the mean and var of distribution stacked\n pm, pv = prior_out\n out_dict[\"cond_prior\"] = torch.stack([pm, pv], dim=-1)\n\n if self.motion_prior.model_data_config == \"smpl+joints+contacts\":\n pred_contacts = pred_dict[\"contacts\"]\n # get binary classification\n contact_conf = torch.sigmoid(pred_contacts)\n pred_contacts = (contact_conf > CONTACT_THRESH).to(torch.float)\n # expand to full body\n full_contact_conf = torch.zeros((B, Tm1, len(SMPL_JOINTS))).to(contact_conf)\n full_contact_conf[:, :, CONTACT_INDS] = (\n full_contact_conf[:, :, CONTACT_INDS] + contact_conf\n )\n full_contacts = torch.zeros((B, Tm1, len(SMPL_JOINTS))).to(pred_contacts)\n full_contacts[:, :, CONTACT_INDS] = (\n full_contacts[:, :, CONTACT_INDS] + pred_contacts\n )\n # repeat first entry for t0\n full_contact_conf = torch.cat(\n [full_contact_conf[:, 0:1], full_contact_conf], dim=1\n )\n full_contacts = torch.cat([full_contacts[:, 0:1], full_contacts], dim=1)\n out_dict[\"contacts_conf\"] = full_contact_conf\n out_dict[\"contacts\"] = full_contacts\n\n if is_sampling:\n out_dict[\"z\"] = pred_dict[\"z\"]\n\n cam_dict = {\n \"trans\": out_dict[\"trans\"],\n \"root_orient\": out_dict[\"root_orient\"],\n \"pose_body\": out_dict[\"pose_body\"],\n \"betas\": out_dict[\"betas\"],\n }\n\n # also must return trans and root orient in camera frame\n cam_dict[\"trans\"], cam_dict[\"root_orient\"] = self.apply_world2prior(\n out_dict[\"trans\"],\n out_dict[\"root_orient\"],\n out_dict[\"pose_body\"],\n out_dict[\"betas\"],\n inverse=True,\n )\n return out_dict, cam_dict\n\n def synchronize_preds(self, pred_dict, seg_len):\n \"\"\"\n synchronize predictions in time, scatter predictions into [0, seq_len)\n \"\"\"\n if not self.async_tracks:\n # predictions are already synchronized\n return pred_dict, None\n\n # return time-synchronized predictions\n start = self.track_start\n end = torch.clip(start + seg_len, max=self.track_end)\n T = end.max()\n pred_dict = scatter_dict_segments(pred_dict, start, end, T)\n pred_mask = get_scatter_mask(start, end, T)\n pred_dict[\"track_mask\"] = pred_mask\n return pred_dict\n\n\ndef scatter_dict_segments(data_dict, start, end, T=None, names=None):\n \"\"\"\n the tracks as they are stored are synchronized by time step\n Uses the start and end to synchronize output predictions\n \"\"\"\n sync_dict = data_dict.copy()\n min_len = (end - start).min()\n if names is None:\n names = data_dict.keys()\n for name in names:\n val = data_dict[name]\n if not isinstance(val, torch.Tensor) or val.ndim < 3 or val.shape[1] < min_len:\n continue\n sync_dict[name] = scatter_intervals(val, start, end, T)\n return sync_dict\n\n\ndef select_dict_segments(data_dict, start, end, names=None):\n out_data_dict = data_dict.copy()\n min_len = (end - start).min()\n if names is None:\n names = data_dict.keys()\n for name in names:\n val = data_dict[name]\n if not isinstance(val, torch.Tensor) or val.ndim < 3 or val.shape[1] < min_len:\n continue\n # only select for time-series observations\n out_data_dict[name] = select_intervals(val, start, end)\n return out_data_dict\n\n\ndef estimate_velocities(trans, root_orient, joints3d, data_fps):\n \"\"\"\n Estimates velocity inputs to the motion prior.\n - trans (B, T, 3) root translation\n - root_orient (B, T, 3) aa root orientation\n - joints3d (B, T, len(SMPL_JOINTS), 3) joints3d of SMPL prediction\n \"\"\"\n B, T, _ = trans.size()\n h = 1.0 / data_fps\n trans_vel = estimate_linear_velocity(trans, h)\n joints_vel = estimate_linear_velocity(joints3d, h)\n root_orient_mat = batch_rodrigues(root_orient.reshape((-1, 3))).reshape(\n (B, T, 3, 3)\n )\n root_orient_vel = estimate_angular_velocity(root_orient_mat, h)\n return trans_vel, joints_vel, root_orient_vel\n\n\ndef estimate_linear_velocity(data_seq, h):\n \"\"\"\n Given some batched data sequences of T timesteps in the shape (B, T, ...), estimates\n the velocity for the middle T-2 steps using a second order central difference scheme.\n The first and last frames are with forward and backward first-order\n differences, respectively\n - h : step size\n \"\"\"\n # first steps is forward diff (t+1 - t) / h\n init_vel = (data_seq[:, 1:2] - data_seq[:, :1]) / h\n # middle steps are second order (t+1 - t-1) / 2h\n middle_vel = (data_seq[:, 2:] - data_seq[:, 0:-2]) / (2 * h)\n # last step is backward diff (t - t-1) / h\n final_vel = (data_seq[:, -1:] - data_seq[:, -2:-1]) / h\n\n vel_seq = torch.cat([init_vel, middle_vel, final_vel], dim=1)\n return vel_seq\n\n\ndef estimate_angular_velocity(rot_seq, h):\n \"\"\"\n Given a batch of sequences of T rotation matrices, estimates angular velocity at T-2 steps.\n Input sequence should be of shape (B, T, ..., 3, 3)\n \"\"\"\n # see https://en.wikipedia.org/wiki/Angular_velocity#Calculation_from_the_orientation_matrix\n dRdt = estimate_linear_velocity(rot_seq, h)\n R = rot_seq\n RT = R.transpose(-1, -2)\n # compute skew-symmetric angular velocity tensor\n w_mat = torch.matmul(dRdt, RT)\n # pull out angular velocity vector\n # average symmetric entries\n w_x = (-w_mat[..., 1, 2] + w_mat[..., 2, 1]) / 2.0\n w_y = (w_mat[..., 0, 2] - w_mat[..., 2, 0]) / 2.0\n w_z = (-w_mat[..., 0, 1] + w_mat[..., 1, 0]) / 2.0\n w = torch.stack([w_x, w_y, w_z], axis=-1)\n return w\n","repo_name":"vye16/slahmr","sub_path":"slahmr/optim/moving_scene.py","file_name":"moving_scene.py","file_ext":"py","file_size_in_byte":27426,"program_lang":"python","lang":"en","doc_type":"code","stars":368,"dataset":"github-code","pt":"81"} +{"seq_id":"12876612627","text":"import os\n\nfrom PyQt5.QtWidgets import *\n\nimport numpy as np\n\nfrom scipy import misc\n\nimport czifile\n\nfrom tifffile import TiffFile\n\nfrom lxml import etree as XMLET\n\nfrom PyQt5.QtCore import QAbstractListModel, QModelIndex, QVariant, QAbstractTableModel\nfrom PyQt5.QtGui import QBrush, QPen\nfrom PyQt5.QtCore import Qt\n\n\nclass CustomRowWidget(QWidget):\n def __init__(self, *args, **kwargs):\n super(CustomRowWidget, self).__init__(*args[1:], **kwargs)\n self.setAcceptDrops(True)\n self.file_path = str(args[0])\n self.isParsingNeeded = True\n self.z_file_path = None\n self.extensions = [\"czi\", \"tiff\", \"tif\", \"lsm\", \"png\"]\n self.row = QHBoxLayout()\n self.row.addWidget(QLabel(args[0].split(os.sep)[-1]))\n self.pushButtonOpenZ = QPushButton()\n self.pushButtonOpenZ.setText(\"Open z-stack\")\n self.pushButtonOpenZ.clicked.connect(self.open_z)\n self.row.addWidget(self.pushButtonOpenZ)\n self.setLayout(self.row)\n\n def open_z(self):\n file_dialog = QFileDialog()\n title = \"Open z-stack\"\n # extensions = \"Confocal images (*.jpg; *.png; *.tif;);;Confocal stacks (*.ics)\"\n # extensions = \"Confocal images (*.jpg *.png *.tif *.ics)\"\n extensions = \"image (*.czi *.tiff *.tif *.lsm *.png\" \\\n \")\"\n files_list = QFileDialog.getOpenFileNames(file_dialog, title,\n os.getcwd(), extensions)[0]\n self.z_file_path = files_list[0]\n self.row.removeWidget(self.pushButtonOpenZ)\n self.row.addWidget(QLabel(self.z_file_path))\n\n def dragEnterEvent(self, e):\n if e.mimeData().hasUrls:\n if all([str(url.toLocalFile()).split(\".\")[-1] in self.extensions for url in e.mimeData().urls()]):\n e.accept()\n else:\n e.ignore()\n\n def dragMoveEvent(self, e):\n if e.mimeData().hasUrls:\n if all([str(url.toLocalFile()).split(\".\")[-1] in self.extensions for url in e.mimeData().urls()]):\n e.accept()\n else:\n e.ignore()\n\n def dropEvent(self, e):\n \"\"\"\n Drop files directly onto the widget\n File locations are stored in fname\n :param e:\n :return:\n \"\"\"\n if e.mimeData().hasUrls:\n e.setDropAction(Qt.CopyAction)\n e.accept()\n # Workaround for OSx dragging and dropping\n for url in e.mimeData().urls():\n self.z_file_path = str(url.toLocalFile())\n self.row.removeWidget(self.pushButtonOpenZ)\n self.row.addWidget(QLabel(self.z_file_path))\n else:\n e.ignore()\n\n\n\n#Rewritten class to read different formats of microscope images and bring them in the same shape.\n#Supportet formats are .czi; .lsm ;.tiff\n#Tiff images will most likely not contain the needed metadata -> Pixel size must be set manually.\n#Batch mechanism off Zeiss SIM will result in broken header file and is not supported yet.\n#Output arrays will be reshaped to [Color[ZStack[X[Y]]]].\n#See MicroscopeImage for input.\n\nclass ImageSIM(CustomRowWidget):\n \"\"\"ImageSIM is an instance of QListWidget class, used to read and order SIM data from common file formats.\n Supported formats are: .czi, .lsm, .tif\n \"\"\"\n #Initialisation see class MicroscopeImage.\n def __init__(self, *args, **kwargs):\n super(ImageSIM, self).__init__(*args, **kwargs)\n self.reset_data()\n\n #Reset ConfocalImage attributes.\n def reset_data(self):\n self.data = []\n self.relevantData = []\n self.metaData = {}\n self._index = np.zeros(4).astype(np.uint8)\n self.isParsingNeeded = True\n self.extend = None\n self._flip = {}\n self._channel = np.zeros(4).astype(np.bool)\n self.data_z = None\n\n #Read the image data and metadata und give them into a numpy array.\n #Rearrange the arrays into a consistent shape.\n def parse(self, calibration_px=0.0322, ApplyButton=False):\n self.isParsingNeeded = False\n self.metaData = {}\n self.data_z = None\n self.data = []\n self.Shape = np.ones(1,dtype={'names':[\"SizeX\",\"SizeY\",\"SizeZ\",\"SizeC\"],'formats':['i4','i4','i4','i4']})\n self.extend = os.path.splitext(self.file_path)[1]\n self._color = np.array(([[1,0,0,1],[0,1,0,1],[0,0,1,1],[1,1,0,1]]))\n #CZI files\n if self.extend == '.czi':\n with czifile.CziFile(self.file_path) as czi:\n self.data = czi.asarray()\n #Get relevant part of file header => Metadata.\n Header_Metadata = czi.metadata#str(czi.decode(\"utf-8\")).split('')\n Metadata = XMLET.fromstring(Header_Metadata)\n try:\n #Query XML fore the metadata for picture shape(X;Y;Z-stacks).\n #Picture Shape.\n shapes = Metadata.findall('./Metadata/Information/Image')[0]\n self.metaData[\"ShapeSizeX\"] = int(shapes.findall('SizeX')[0].text)\n self.metaData[\"ShapeSizeY\"] = int(shapes.findall('SizeY')[0].text)\n try:\n self.metaData[\"ShapeSizeZ\"] = int(shapes.findall('SizeZ')[0].text)\n except:\n self.metaData[\"ShapeSizeZ\"] = 1\n print(\"One z-Slice\")\n #Get the hyperstack dimension if the image is a hyperstack.\n try:\n self.metaData[\"ShapeSizeC\"] = int(shapes.findall('SizeC')[0].text)\n except:\n self.metaData[\"ShapeSizeC\"] = 1\n print(\"One Channel\")\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n PixelSizes = Metadata.findall('./Metadata/Scaling/Items/Distance')\n self.metaData['SizeX'] = float(PixelSizes[0].findall('Value')[0].text)*10**6\n self.metaData['SizeY'] = float(PixelSizes[1].findall('Value')[0].text)*10**6\n self.metaData['SizeZ'] = float(PixelSizes[2].findall('Value')[0].text)*10**6\n except:\n raise\n print(\"Metadata fail\")\n\n #Tiff files.\n #Tiff files are problematic because they most likely wont contain the nessecary metadata.\n #Try to get the shape info over common dimensions.\n elif self.extend == '.tif' or self.extend == '.tiff':\n #z_name = os.path.splitext(self.file_path)[0]+\"-z-stack.tif\"\n if self.z_file_path is not None:\n if os.path.exists(self.z_file_path):\n with TiffFile(self.z_file_path) as tif:\n self.data_z = tif.asarray()\n #z_name = os.path.splitext(self.file_path)[0]+\"-z-stack.tiff\"\n #if os.path.exists(z_name):\n # with tifffile.TiffFile(z_name) as tif:\n # self.data_z = tif.asarray()\n with TiffFile(self.file_path) as tif:\n #print(tif.imagej_metadata)\n self.data = tif.asarray()#[...,0]#np.moveaxis(tif.asarray(),0,1)\n\n #self.data = np.rollaxis(self.data,0,1)\n #self.data = np.rollaxis(self.data,2,0)\n self.metaData[\"ShapeSizeC\"] = 3\n self.metaData[\"ShapeSizeZ\"] = 1\n self.metaData[\"SizeZ\"] = 1\n self.metaData[\"SizeX\"] = calibration_px\n self.metaData[\"SizeY\"] = calibration_px\n self.metaData[\"ShapeSizeY\"] = self.data.shape[-2]\n self.metaData[\"ShapeSizeX\"] = self.data.shape[-1]\n for page in tif.pages:\n for tag in page.tags.values():\n tag_name, tag_value = tag.name, tag.value\n #print(tag_name, tag_value)\n if \"ImageDescription\" in tag_name:\n tags = tag_value.split(\"\\n\")\n axes = []\n lengths=[]\n for tag in tags:\n if \"axes\" in tag:\n axes = tag.split(\"=\")[-1].split(\",\")\n print(\"calculating axe dimensions\")\n if \"lengths\" in tag:\n lengths = tag.split(\"=\")[-1].split(\",\")\n if \"slices\" in tag:\n print(\"Found Z Stack\")\n axes.append(\"Slices\")\n lengths.append(tag.split(\"=\")[-1])\n if \"channels\" in tag:\n print(\"Found Color Channels\")\n axes.append(\"Channels\")\n lengths.append(tag.split(\"=\")[-1])\n # for i,axe in enumerate(axes):\n # if \"X\" in axe:\n # self.metaData[\"ShapeSizeX\"] = int(lengths[i])\n # if \"Y\" in axe:\n # self.metaData[\"ShapeSizeX\"] = int(lengths[i])\n # if \"Channel\" in axe:\n # self.metaData[\"ShapeSizeC\"] = int(lengths[i])\n # if \"Slices\" in axe:\n # self.metaData[\"ShapeSizeZ\"] = int(lengths[i])\n\n\n #Read Lsm Files.\n elif self.extend == '.lsm':\n with TiffFile(self.file_path) as tif:\n self.data = tif.asarray(memmap=True)\n headerMetadata = str(tif.pages[0].cz_lsm_scan_info)\n metadataList = headerMetadata.split(\"\\n*\")\n #Get image shape from lsm header SizeC=0 if not given.\n for shapes in metadataList:\n if \"images_height\" in shapes:\n self.metaData[\"ShapeSizeX\"]= int(shapes.split()[-1])\n if \"images_width\" in shapes:\n self.metaData[\"ShapeSizeY\"]= int(shapes.split()[-1])\n if \"images_number_planes\" in shapes:\n self.metaData[\"ShapeSizeZ\"]= int(shapes.split()[-1])\n if \"images_number_channels\" in shapes:\n self.metaData[\"ShapeSizeC\"]= int(shapes.split()[-1])\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n self.data = np.swapaxes(self.data,1,2)\n LsmPixelHeader = str(tif.pages[0].tags.cz_lsm_info)\n LsmInfo = LsmPixelHeader.split(\", \")\n i = 0\n #Query for pixel size.\n for element in LsmInfo:\n\n if \"e-0\" in element:\n i += 1\n if i == 1:\n self.metaData['SizeX'] = (float(element)*10**6)\n if i == 2:\n self.metaData['SizeY'] = (float(element)*10**6)\n if i == 3:\n self.metaData['SizeZ'] = (float(element)*10**6)\n\n elif self.extend == \".png\":\n self.data = misc.imread(self.file_path)\n self.data = np.expand_dims(np.expand_dims(self.data[...,0],0),0)\n self.metaData[\"ShapeSizeC\"] = 1\n self.metaData[\"ShapeSizeZ\"] = 1\n self.metaData[\"ShapeSizeX\"] = self.data.shape[2]\n self.metaData[\"ShapeSizeY\"] = self.data.shape[3]\n self.metaData[\"SizeZ\"] = 1\n self.metaData[\"SizeX\"] = 0.01\n self.metaData[\"SizeY\"] = 0.01\n #todo:hack 2d into 4d\n if len(self.data.shape) ==2:\n new_data = np.zeros((3,1,self.data.shape[0], self.data.shape[1]))\n self.metaData['SizeX'] = calibration_px\n self.metaData['SizeY'] = calibration_px\n\n new_data[0,0] = self.data\n self.data = new_data\n if len(self.data.shape) == 3:\n if self.data.shape[2] <5:\n new_data = np.zeros((3,1,self.data.shape[0],self.data.shape[1]))\n new_data[0,:] = np.sum(self.data,axis=2)\n self.metaData[\"ShapeSizeY\"] = self.data.shape[0]\n self.metaData[\"ShapeSizeX\"] = self.data.shape[1]\n else:\n new_data = np.zeros((3,1,self.data.shape[1], self.data.shape[2]))\n self.metaData['SizeX'] = calibration_px\n self.metaData['SizeY'] = calibration_px\n\n for i in range(self.data.shape[0]):\n new_data[i,0] = self.data[i]\n self.data = new_data\n # #Bring all formats in the same shape.\n for i,n in enumerate(self.data.shape):\n if n == self.metaData[\"ShapeSizeC\"]:\n self.data = np.rollaxis(self.data, i, 0)\n if n == self.metaData[\"ShapeSizeZ\"]:\n self.data = np.rollaxis(self.data, i, 1)\n if n == self.metaData[\"ShapeSizeY\"]:\n self.data = np.rollaxis(self.data, i, 2)\n if n == self.metaData[\"ShapeSizeX\"]:\n self.data = np.rollaxis(self.data, i, 3)\n self.data = np.reshape(self.data,(self.metaData[\"ShapeSizeC\"],self.metaData[\"ShapeSizeZ\"],self.metaData[\"ShapeSizeY\"],self.metaData[\"ShapeSizeX\"]))\n self.metaData['ChannelNum'] = self.metaData[\"ShapeSizeC\"]\n #Set pixel size to manuell value if there are no metadata.\n if self.metaData == {}:\n self.set_calibration(calibration_px)\n #Set the box for manuel calibration to the actuell pixel size.\n\n\n @property\n def data_rgba_2d(self):\n if not np.any(self._channel):\n raise ValueError(\"No channel visible\")\n visible_data = self.data[(np.where(self._channel))]\n data_rgba = np.zeros((visible_data.shape[0], self.metaData[\"ShapeSizeY\"],self.metaData[\"ShapeSizeX\"],4))\n indices = self._index[np.where(self._channel)]\n for i in range(visible_data.shape[0]):\n data_rgba[i] = np.stack((visible_data[i,indices[i]],)*4,axis=-1)\n data_rgba[i] *= self._color[np.where(self._channel)][i]\n return data_rgba\n\n @property\n def data_gray_2d(self):\n if not np.any(self._channel):\n raise ValueError(\"No channel visible\")\n visible_data = self.data[(np.where(self._channel))]\n data_gray = np.zeros((visible_data.shape[0], self.metaData[\"ShapeSizeY\"],self.metaData[\"ShapeSizeX\"]))\n indices = self._index[np.where(self._channel)]\n for i in range(visible_data.shape[0]):\n data_gray[i] = visible_data[i,indices[i]]\n return data_gray\n\n\n @property\n def data_rgba_3d(self):\n return self.data[np.where(self._channel)]\n\n @property\n def channel(self, index):\n return self._channel[index]\n\n @channel.setter\n def channel(self, value):\n self._channel[value[0]] = value[1]\n\n @property\n def index(self, channel):\n return self._index[channel]\n\n @index.setter\n def index(self, value):\n if value[1] > self.metaData[\"ShapeSizeZ\"]:\n raise ValueError(\"Index out of bounds\")\n self._index[value[0]] = value[1]\n\n @property\n def color(self, channel):\n return self._color[channel]\n\n @color.setter\n def color(self, channel, value):\n if value.shape[0] != 4:\n raise ValueError(\"Not a color\")\n self._color[channel] = value\n\n @property\n def flip(self, direction):\n \"\"\"\n directions: UpsideDown, LeftRight\n value: True, False\n \"\"\"\n return self._flip[direction]\n\n @flip.setter\n def flip(self, direction, value):\n self._flip[direction] = value\n\n #Set pixel size to manuell value.\n def set_calibration(self, px):\n self.metaData['SizeX'] = px\n self.metaData['SizeY'] = px\n self.metaData['SizeZ'] = px","repo_name":"super-resolution/lineprofiler","sub_path":"src/controllers/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":15935,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"13462559015","text":"import scrapy\nfrom imoti_scrapper.items import EstateItem\n\nclass EstateSpider(scrapy.Spider):\n name = \"estate\"\n\n def start_requests(self):\n urls = [\n 'https://www.imot.bg/pcgi/imot.cgi?act=3&slink=30zkmc&f1=1'\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n result = []\n \n for estate in response.css('table[width=\"660\"]'):\n if estate.css('div.price::text').extract_first():\n item = EstateItem()\n item['url'] = estate.css('a.photoLink::attr(href)').extract_first()\n item['price'] = estate.css('div.price::text').extract_first()\n item['estateType'] = estate.css('a.lnk1::text').extract_first()\n item['description'] = estate.css('tr:nth-child(3) td::text').extract_first()\n item['location'] = estate.css('a.lnk2::text').extract_first()\n yield item\n\n current_page_number = int(response.request.url[-1])\n next_page_url = (response.request.url[:-1] + str(current_page_number + 1))[6:]\n next_page = response.css('a[href=\"' + next_page_url + '\"]::attr(href)').extract_first()\n\n if next_page is not None:\n next_page = response.urljoin(next_page)\n yield scrapy.Request(next_page, callback=self.parse)\n","repo_name":"veselinn/estates-spider","sub_path":"spiders/estate_spider.py","file_name":"estate_spider.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37608808787","text":"# File: DNA.py\n\n# Description: An algorithm designed to find longest common substring given different pairs of DNA sequences\n\n# Student Name: Stephen Rauner\n\n# Student UT EID: STR428\n\n# Course Name: CS 303E\n\n# Unique Number: 50475\n\n# Date Created: 10/14/2015\n\n# Date Last Modified: 10/14/2015\n\n\n# function to test if specified sub (of dna2) is present in strand (dna1)\ndef is_in (sub, strand):\n\n\t# loop searching through every sub of size len(sub) within strand\n\tfor i in range(len(strand) - len(sub) + 1):\n\n\t\t# conditional testing if our sub ever occurs in strand\n\t\tif (sub == strand[i:(i + len(sub))]):\n\t\t\treturn True\n\n\treturn False\n\t\t\n\ndef main():\n # open file for reading\n in_file = open (\"./dna.txt\", \"r\")\n\n # read number of pairs\n num_pairs = in_file.readline()\n num_pairs = num_pairs.strip()\n num_pairs = int (num_pairs)\n\n # create a 2D list which will contain lists for each respective pair\n commons = []\n\n # read each pair of dna strands\n for i in range (num_pairs):\n st1 = in_file.readline()\n st2 = in_file.readline()\n\n # remove white space from either end\n st1 = st1.strip()\n st2 = st2.strip()\n\n # make both strands upper case\n st1 = st1.upper()\n st2 = st2.upper()\n\n # order strands by size (dna1 is always larger or equal)\n if (len(st1) > len(st2)):\n dna1 = st1\n dna2 = st2\n else:\n dna1 = st2\n dna2 = st1\n\n # get all substrings of dna2\n wnd = len (dna2)\n\n # create list of biggest common elements; start with empty\n pair_common = []\n\n \t# makes sure to run through every window size greater than one\n while (wnd > 1):\n start_idx = 0\n\n while ((start_idx + wnd) <= len (dna2)):\n sub_strand = dna2[start_idx: (start_idx + wnd)]\n\n # appends sub_strand to list of common pairs if it exists in dna1\n if (is_in (sub_strand, dna1) \n\n # checks if the sub_strand is already accounted for;\n # if it is, it won't add it to the list again\n and (pair_common.count(sub_strand) == 0)):\n pair_common.append(sub_strand)\n\n # move starting place by 1\n start_idx += 1\n\n # once it finds a common element, it finishes\n # looking in that window and then stops looking\n if (len(pair_common) > 0):\n commons.append(pair_common)\n break\n\n # append commons list with an empty set if there are no common substrings\n if ((len(pair_common) == 0) and (wnd == 2)):\n commons.append([])\n\n # decrease window size\n wnd = wnd - 1\n\n # loop to format final response\n print(\"\\nLongest Common Sequences\\n\")\n\n # first - load ith row of matrix commons\n for i in range (len(commons)):\n\n # immediately - check if there exist any common substrings\n if (len(commons[i]) == 0):\n print (\"Pair {}: No Common Sequence Found\\n\".format(i + 1))\n continue\n\n # now - run through each element of the ith row\n for j in range (len(commons[i])):\n\n # only for the first element (j == 0) of the\n # set do we want to include \"pair x\" prior\n if (j == 0): \n print (\"Pair {}: {}\".format(i + 1, commons[i][j]))\n\n # for all but the first, make 8 spaces then the new element\n else:\n print (\"{:>8}{}\".format(\"\", commons[i][j]))\n print()\n\n\n # close file\n in_file.close()\n\nmain()","repo_name":"stOracle/Migrate","sub_path":"Programming/CS303E/DNA.py","file_name":"DNA.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34452378846","text":"#Question 1\ndef max_of_three_numbers():\n a = int(input(\"Enter first number: \"))\n b = int(input(\"Enter second number: \"))\n c = int(input(\"Enter last number: \"))\n maximum = a\n if b > maximum:\n maximum = b\n if c > maximum:\n maximum = c\n return maximum\n\n\n#Question 2\ndef sum_of_numbers_in_a_list():\n numbers = []\n total = 0\n for i in range(5):\n print(\"Enter value for number \", (i+1))\n digit = int(input())\n numbers.append(digit)\n for number in numbers:\n total += number\n return total\n\n#Question 3\ndef multiply_numbers_in_list():\n numbers = []\n product = 1\n for i in range(5):\n print(\"Enter value for number \", (i+1))\n digit = int(input())\n numbers.append(digit)\n product = 1\n for number in numbers:\n product *= number\n return product\n\n#Question 4\ndef reverse_string():\n texts = input(\"Enter a word: \")\n new_word = ''\n length = len(texts) - 1\n while length >= 0:\n new_word += texts[length]\n length -= 1\n\n return new_word\n\n#Question 5\ndef calculateFactorial():\n a = int(input(\"Enter a number: \"))\n factorial = 1\n for i in range(1,a+1):\n factorial *= i\n return factorial\n\n#Question 6\ndef find_range():\n a = int(input(\"Enter a number: \"))\n response = \"The value is in range of 1-10\"\n second_response = \"number is not in range\"\n if a in range(1,21):\n return response \n else:\n return second_response\n\n#Question 7\ndef count_lower_and_uppercase():\n words = input(\"Enter a sentence: \")\n letters = {'upper_case': 0, 'lower_case': 0}\n for letter in words:\n if letter.islower():\n letters[\"lower_case\"]+=1\n if letter.isupper():\n letters[\"upper_case\"]+=1\n print(\"The number of uppercase is: \", letters[\"upper_case\"])\n print(\"The number of lowercase is: \", letters['lower_case'])\n\n#Question 8\ndef unique_list():\n print(\"Enter 10 numbers at random with no regards for uniqueness\")\n list = []\n for i in range(10):\n number = int(input(\"Enter a number: \"))\n list.append(number)\n new_list = []\n for number in list:\n if number not in new_list:\n new_list.append(number)\n return new_list\n\n#Question 9\ndef prime_number():\n a = int(input(\"Enter a number: \"))\n first_response = \"is a prime number\"\n second_response = \"is not a prime number\"\n factors = 0\n for number in range(2, a+1):\n if a % number == 0:\n factors += number\n\n if factors == a:\n print(a, end=\" \")\n return first_response\n else:\n print(a, end=\" \")\n return second_response\n\n#Question 10\ndef even_number_in_list():\n print(\"A list of 10 numbers\")\n list = []\n for i in range(10):\n digit = int(input(\"Enter a number: \"))\n list.append(digit)\n x = []\n for number in list:\n if number % 2 == 0:\n x.append(number)\n return x\n","repo_name":"everybees/parsel_tongue","sub_path":"tosin/functions/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"30415821506","text":"# Hangman game\n## This is just the project for MIT \"Introduction to Computation and Programming with Python Course\"#\n# Let's add a new feature.#\n\nimport random\n\nWORDLIST_FILENAME = \"words.txt\"\n\ndef loadWords():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\ndef chooseWord(wordlist):\n \"\"\"\n wordlist (list): list of words (strings)\n\n Returns a word from wordlist at random\n \"\"\"\n return random.choice(wordlist)\n\n# end of helper code\n# -----------------------------------\n\n# Load the list of words into the variable wordlist\n# so that it can be accessed from anywhere in the program\nwordlist = loadWords()\n\ndef isWordGuessed(secretWord, lettersGuessed):\n '''\n secretWord: string, the word the user is guessing\n lettersGuessed: list, what letters have been guessed so far\n returns: boolean, True if all the letters of secretWord are in lettersGuessed;\n False otherwise\n '''\n return all([i in lettersGuessed for i in set(secretWord)])\n # OR\n # return set(secretWord) == (set(secretWord) & lettersGuessed)\n \n \n \n\n\n\ndef getGuessedWord(secretWord, lettersGuessed):\n '''\n secretWord: string, the word the user is guessing\n lettersGuessed: list, what letters have been guessed so far\n returns: string, comprised of letters and underscores that represents\n what letters in secretWord have been guessed so far.\n '''\n for i in secretWord:\n if i not in lettersGuessed:\n secretWord = secretWord.replace(i, '_ ')\n return secretWord\n # List Comprehension\n # return ''.join([l if l in letterGuessed else '_ ' for l in secretWord])\n\n\n\ndef getAvailableLetters(lettersGuessed):\n '''\n lettersGuessed: list, what letters have been guessed so far\n returns: string, comprised of letters that represents what letters have not\n yet been guessed.\n '''\n return ''.join([letter for letter in 'abcdefghijklmnopqrstuvwxyz' if letter not in lettersGuessed])\n \n\ndef hangman(secretWord):\n '''\n secretWord: string, the secret word to guess.\n\n Starts up an interactive game of Hangman.\n p;;;;;;;;;-999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999============/'??????????? \n * At the start of the [;'ppppppp'game, let the user know how many \n letters the secretWord contains.\n\n * Ask the user to supply one guess (i.e. letter) per round.\n\n * The user should receive feedback immediately after each guess \n about whether their guess appears in the computers word.\n\n * After each round, you should also display to the user the \n partially guessed word so far, as well as letters that the \n user has not yet guessed.\n\n Follows the other limitations detailed in the problem write-up.\n '''\n print('Welcome to the game Hangman!')\n print(f'I am thinking of a word that is {len(secretWord)} letters long.')\n\n lettersGuessed = []\n round = 8\n while round > 0:\n \n print(f\"You have {round} guesses left.\")\n print('Available letters:', getAvailableLetters(lettersGuessed))\n \n letter = input('Please guess a letter: ').lower()\n if letter in lettersGuessed:\n print(\"Opps! You've already guessed that letter: \", getGuessedWord(secretWord, lettersGuessed))\n print(\"------------------------------------\")\n continue\n else:\n lettersGuessed.append(letter)\n \n if isWordGuessed(secretWord, lettersGuessed):\n print(\"--------------------------------------\")\n print(\"Congratulations, you won!\")\n break\n else:\n if letter in secretWord:\n print(\"Good guess:\", getGuessedWord(secretWord, lettersGuessed))\n print(\"------------------------------------\")\n else:\n print(\"Oops! That letter is not in my word:\", getGuessedWord(secretWord, lettersGuessed))\n print(\"------------------------------------\")\n round -= 1\n if round == 0:\n print(f\"Sorry, you ran out of guesses. The word was {secretWord}.\")\n \n \n\n\nsecretWord = chooseWord(wordlist).lower()\nprint(secretWord)\nhangman(secretWord)\n","repo_name":"yinminaung/project_hangman","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70388662026","text":"import cPickle as pickle\nimport numpy as np\n\n\ndata_path = '../data/'\ndataset = 'restaurant'\n\nreview_set = pickle.load(open(data_path + dataset + '/' + 'reviews.pkl', 'rb'))\n\nnins = len(review_set)\ntrindex = set(np.random.choice(nins, size=nins / 2, replace=False, p=None))\n\ntrainset = list()\ntestset = list()\nfor i in xrange(nins):\n if i in trindex:\n trainset.append(review_set[i])\n else:\n testset.append(review_set[i])\n\ntrfile = data_path + dataset + '/splits/train0.pkl'\npickle.dump(trainset, open(trfile, 'wb'))\n\ntsfile = data_path + dataset + '/splits/test0.pkl'\npickle.dump(testset, open(tsfile, 'wb'))\n\n\n\n","repo_name":"lipingliulp/sentiment-analysis","sub_path":"prepare_data/restaurant/separate_folds.py","file_name":"separate_folds.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28628555829","text":"result = []\nt = int(input())\nfor i in range(t) :\n r, s = input().split()\n r = int(r)\n j = 0\n for j in range(len(s)):\n for k in range(r):\n result.append(s[j])\n result.append(\"\\n\")\n\nfor i in result:\n print(i, end='')","repo_name":"mangbaam/CodingTest","sub_path":"baekjoon/미분류/문자열반복.py","file_name":"문자열반복.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9014154519","text":"from monitorTopology.models import Session, Node, Server, Agent, ISP, Network, Edge, NetEdge, PeeringEdge, Latency, Hop, Subnetwork, ServerProbing, NetProbing\r\nfrom monitorTopology.ipinfo import *\r\nfrom django.utils import timezone\r\nfrom django.db import transaction\r\nfrom django.db.models import Q\r\nfrom monitorTopology.azure_agents import *\r\nfrom monitorTopology.anomalies_utils import *\r\nfrom monitorTopology.lat_utils import *\r\nimport math\r\nimport sys\r\nimport requests\r\n# import the logging library\r\nimport logging\r\n\r\n# Get an instance of a logger\r\nlogger = logging.getLogger(__package__)\r\n\r\n### @function add_node(node_ip, nodeTyp=\"router\")\r\n# @params:\r\n# node_ip : the ip address of a given node\r\n# nodeTyp : the type of a given node. Can be client, server, router in a video session\r\n# or pl_agent, which is a probing agent in PlanetLab\r\n# or azure_agent, which is a probing agent in Azure\r\n# @return: the node object in Node model\r\n@transaction.atomic\r\ndef add_node(node_ip, nodeTyp=\"router\", nodeName=None, netTyp=\"transit\"):\r\n try:\r\n node = Node.objects.get(ip=node_ip)\r\n node.type = nodeTyp\r\n node.save()\r\n except:\r\n node_info = get_node_info(node_ip)\r\n\r\n try:\r\n node_isp = ISP.objects.get(ASNumber=node_info[\"AS\"])\r\n except:\r\n node_isp = ISP(ASNumber=node_info[\"AS\"], name=node_info[\"ISP\"], type=netTyp)\r\n node_isp.save()\r\n\r\n latitude = float(node_info['latitude'])\r\n latitude_str = '{0:.6f}'.format(latitude)\r\n longitude = float(node_info['longitude'])\r\n longitude_str = '{0:.6f}'.format(longitude)\r\n # print(\"AS \" + str(node_isp.ASNumber) + \"(\" + latitude_str + \",\" + longitude_str + \")\" )\r\n try:\r\n node_network = Network.objects.get(isp=node_isp, latitude=latitude_str, longitude=longitude_str)\r\n except:\r\n node_network = Network(isp=node_isp, latitude=latitude_str, longitude=longitude_str, city=node_info[\"city\"], region=node_info[\"region\"], country=node_info[\"country\"])\r\n node_network.save()\r\n\r\n if nodeName:\r\n node = Node(ip=node_ip, name=nodeName, type=nodeTyp, network=node_network)\r\n else:\r\n node = Node(ip=node_ip, name=node_info['name'], type=nodeTyp, network=node_network)\r\n node.save()\r\n\r\n if node not in node_network.nodes.all():\r\n node_network.nodes.add(node)\r\n node_network.save()\r\n\r\n if node_network not in node_isp.networks.all():\r\n node_isp.networks.add(node_network)\r\n node_isp.save()\r\n\r\n ## Wrap up the server node\r\n if nodeTyp == \"server\":\r\n try:\r\n srv = add_server(node)\r\n except:\r\n print(\"Failed to wrap up the server node as server object: \" + node.ip)\r\n\r\n return node\r\n\r\n### @function init_azure_nodes()\r\n# @descr: Add all Azure nodes\r\n# @params:\r\n# node: the ip of the agent to be added\r\ndef init_azure_nodes():\r\n azure_nodes = list_azure_agents(\"monitoring\", \"agent-\")\r\n for azure_node in azure_nodes:\r\n az_node = add_node(azure_node[\"ip\"], nodeTyp=\"client\", nodeName=azure_node[\"name\"], netTyp=\"cloud\")\r\n\r\n### @function add_server(node)\r\n# @descr: Wrap up a server node as a server object\r\n# @params:\r\n# node: the ip of the agent to be added\r\ndef add_server(node):\r\n try:\r\n srv = Server.objects.get(node=node)\r\n except:\r\n srv = Server(node=node)\r\n srv.save()\r\n return srv\r\n\r\n### @function add_agent(agent_ip, agent_type)\r\n# @descr: Add an agent by its ip\r\n# @params:\r\n# agent_ip: the ip of the agent to be added\r\ndef add_agent(agent_ip):\r\n agent_node = add_node(agent_ip, nodeTyp=\"client\", netTyp=\"access\")\r\n if agent_node.network.isp.name.__contains__(\"Microsoft\"):\r\n agent_typ = \"azure\"\r\n ## Check if the ISP is AWS or Google here.\r\n # elif agent_node.network.isp.name.__contains__(\"Microsoft\"):\r\n else:\r\n agent_typ = \"planetlab\"\r\n try:\r\n agent = Agent.objects.get(node=agent_node, agentType=agent_typ)\r\n except:\r\n agent = Agent(node=agent_node, agentType=agent_typ)\r\n agent.save()\r\n return agent\r\n\r\n\r\n### @function add_private_node(pre_node, dst_isp)\r\n# @descr: Add an \"*\" node in the network\r\n# @params:\r\n# pre_node : previous known node\r\ndef add_private_node(pre_node, nodeTyp=\"router\"):\r\n cur_net = pre_node.network\r\n try:\r\n private_node = Node.objects.get(ip=\"*\", network=pre_node.network)\r\n except:\r\n private_node = Node(ip=\"*\", name=\"*\", type=nodeTyp, network=cur_net)\r\n private_node.save()\r\n if private_node not in cur_net.nodes.all():\r\n cur_net.nodes.add(private_node)\r\n\r\n # update the link between \"*\" node and the closest given node.\r\n update_edge(pre_node, private_node, 500)\r\n\r\n #if private_node not in pre_node.network.nodes.all():\r\n # pre_node.network.nodes.add(private_node)\r\n\r\n return private_node\r\n\r\n### @function update_peering(src_isp, dst_isp)\r\n# @descr: Save the peering relationship in the database\r\n# @params:\r\n# src_isp : the source ISP in the peering link\r\n# dst_isp : the destination ISP in the peering link\r\n@transaction.atomic\r\ndef update_peering(src_isp, dst_isp):\r\n if src_isp.ASNumber == dst_isp.ASNumber:\r\n return\r\n\r\n if src_isp.ASNumber > dst_isp.ASNumber:\r\n tmp_isp = src_isp\r\n src_isp = dst_isp\r\n dst_isp = tmp_isp\r\n\r\n try:\r\n peering_link = PeeringEdge.objects.get(srcISP=src_isp, dstISP=dst_isp)\r\n except:\r\n peering_link = PeeringEdge(srcISP=src_isp, dstISP=dst_isp)\r\n peering_link.save()\r\n\r\n\r\n### @function update_net_edge(srcNet, dstNet, isIntra)\r\n# @descr: Save the edge between two networks in the database\r\n# @params:\r\n# srcNet : the source network of the link\r\n# dstNet : the destination network of the link\r\n# isIntra : denotes if the link is an intra ISP link\r\n@transaction.atomic\r\ndef update_net_edge(srcNet, dstNet, isIntra):\r\n if srcNet.id == dstNet.id:\r\n return\r\n\r\n if srcNet.id > dstNet.id:\r\n tmpNet = srcNet\r\n srcNet = dstNet\r\n dstNet = tmpNet\r\n\r\n try:\r\n net_edge = NetEdge.objects.get(srcNet=srcNet, dstNet=dstNet)\r\n except:\r\n net_edge = NetEdge(srcNet=srcNet, dstNet=dstNet, isIntra=isIntra)\r\n net_edge.save()\r\n\r\n\r\n### @function update_edge(src_node, dst_node, latency)\r\n# @params:\r\n# route : a json object of a traceroute session info\r\n# The key denotes the hop number. 0 denotes the client and the maximum key denotes the server\r\n# Each value object contains info {\"ip\": hop_ip_x.x.x.x, \"name\": hop_hostname, \"time\": time_to_get_to_the_hop}\r\ndef update_edge(src_node, dst_node, latency):\r\n if src_node.ip == dst_node.ip:\r\n return\r\n\r\n if src_node.ip > dst_node.ip:\r\n tmp_node = src_node\r\n src_node = dst_node\r\n dst_node = tmp_node\r\n\r\n try:\r\n link = Edge.objects.get(src=src_node, dst=dst_node)\r\n except:\r\n link_is_intra = (src_node.network.isp.ASNumber == dst_node.network.isp.ASNumber)\r\n link = Edge(src=src_node, dst=dst_node, isIntra=link_is_intra)\r\n link.save()\r\n\r\n # Add peering link if necessary\r\n if not link_is_intra:\r\n update_peering(src_node.network.isp, dst_node.network.isp)\r\n\r\n # Add network_edge if neccessary\r\n if src_node.network.id != dst_node.network.id:\r\n update_net_edge(src_node.network, dst_node.network, link_is_intra)\r\n\r\n link_latency = Latency(latency=latency, timestamp=timezone.now())\r\n link_latency.save()\r\n\r\n link.latencies.add(link_latency)\r\n link.save()\r\n\r\n### add_hop(hop, hop_id, session)\r\n# @description: add the current node object as a hop in the session\r\n# @params:\r\n# hop : the node object of current hop\r\n# hop_id : The hop id of current id in the session\r\n# session : The session the hop is on.\r\ndef add_hop(hop, hop_id, session):\r\n ## Add hop of current node\r\n try:\r\n cur_hop = Hop.objects.get(node=hop, hopID=hop_id, session=session)\r\n except:\r\n cur_hop = Hop(node=hop, hopID=hop_id, session=session)\r\n cur_hop.save()\r\n\r\n### add_hop(hop, hop_id, session)\r\n# @description: add the current network object as a subnet in the session\r\n# @params:\r\n# node_net : the network object\r\n# net_id : the sequence number of the network in the session\r\n# session : The session the network is on.\r\ndef add_subnet(node_net, net_id, session):\r\n try:\r\n cur_net = Subnetwork.objects.get(network=node_net, netID=net_id, session=session)\r\n except:\r\n cur_net = Subnetwork(network=node_net, netID=net_id, session=session)\r\n cur_net.save()\r\n\r\n### @function add_route(route)\r\n# @params:\r\n# route : a json object of a traceroute session info\r\n# The key denotes the hop number. 0 denotes the client and the maximum key denotes the server\r\n# Each value object contains info {\"ip\": hop_ip_x.x.x.x, \"name\": hop_hostname, \"time\": time_to_get_to_the_hop}\r\ndef add_route(route):\r\n hop_ids = sorted(route.keys(), key=int)\r\n client = route[hop_ids[0]]\r\n server = route[hop_ids[-1]]\r\n if (client['ip'] == \"*\") or (server[\"ip\"] == \"*\"):\r\n return\r\n\r\n client_node = add_node(client[\"ip\"], \"client\", client[\"name\"], \"access\")\r\n server_node = add_node(server[\"ip\"], \"server\", server[\"name\"], \"cloud\")\r\n\r\n session = add_session(client_node, server_node)\r\n sub_net_id = 0\r\n add_hop(client_node, int(hop_ids[0]), session)\r\n add_subnet(client_node.network, sub_net_id, session)\r\n\r\n pre_node = client_node\r\n pre_time = client[\"time\"]\r\n for hop_id in hop_ids[1:-1]:\r\n cur_hop = route[hop_id]\r\n if (cur_hop[\"ip\"] == \"*\") or (is_reserved(cur_hop[\"ip\"])):\r\n cur_node = add_private_node(pre_node)\r\n add_hop(cur_node, hop_id, session)\r\n pre_node = cur_node\r\n continue\r\n\r\n cur_node = add_node(cur_hop[\"ip\"])\r\n cur_time = cur_hop[\"time\"]\r\n\r\n latency = cur_time - pre_time\r\n if latency < 0:\r\n latency = 0\r\n update_edge(pre_node, cur_node, latency)\r\n add_hop(cur_node, hop_id, session)\r\n if cur_node.network.id != pre_node.network.id:\r\n sub_net_id += 1\r\n add_subnet(cur_node.network, sub_net_id, session)\r\n\r\n pre_node = cur_node\r\n pre_time = cur_time\r\n\r\n latency = server[\"time\"] - pre_time\r\n if latency < 0:\r\n latency = 0\r\n update_edge(pre_node, server_node, latency)\r\n add_hop(server_node, int(hop_ids[-1]), session)\r\n\r\n if server_node.network.id != pre_node.network.id:\r\n sub_net_id += 1\r\n add_subnet(server_node.network, sub_net_id, session)\r\n\r\n### @function add_session(client, server)\r\n# @params:\r\n# client : the client node object of the session\r\n# server : the server node object of the session\r\ndef add_session(client, server):\r\n try:\r\n session = Session.objects.get(client=client, server=server)\r\n except:\r\n session = Session(client=client, server=server)\r\n session.save()\r\n return session\r\n\r\n\r\n### @function get_agent(obj, agentType)\r\n# @params:\r\n# obj : the server/network to probe\r\n# agentType : the type of agent to obtain\r\n#\r\ndef get_agent(obj, agentType):\r\n agents = Agent.objects.filter(agentType=agentType).all()\r\n\r\n obj_type = obj.get_class_name()\r\n if obj_type == \"server\":\r\n obj_lat = obj.node.network.latitude\r\n obj_lon = obj.node.network.longitude\r\n # By default, the else denotes the \"network\" case\r\n else:\r\n obj_lat = obj.latitude\r\n obj_lon = obj.longitude\r\n\r\n if agents.count() > 0:\r\n obj_agent = agents[0]\r\n min_dist = get_distance(obj_lat, obj_lon, obj_agent.node.network.latitude, obj_agent.node.network.longitude)\r\n for agent in agents[1:]:\r\n cur_dist = get_distance(obj_lat, obj_lon, agent.node.network.latitude, agent.node.network.longitude)\r\n if cur_dist < min_dist:\r\n obj_agent = agent\r\n min_dist = cur_dist\r\n elif cur_dist == min_dist:\r\n if obj_type == \"server\":\r\n obj_agent_cnt = obj_agent.servers.count()\r\n cur_agent_cnt = agent.servers.count()\r\n else:\r\n obj_agent_cnt = obj_agent.networks.count()\r\n cur_agent_cnt = agent.networks.count()\r\n if obj_agent_cnt > cur_agent_cnt:\r\n obj_agent = agent\r\n min_dist = cur_dist\r\n\r\n return obj_agent\r\n else:\r\n return None\r\n\r\n## @function get_distance(lat1, lon1, lat2, lon2)\r\n# @params:\r\n# lat1, lon1 : the latitude and longitude of the first object\r\n# lat2, lon2 : the latitude and longitude of the first object\r\n# @return: dist ---- the geographical distance\r\ndef get_distance(lat1, lon1, lat2, lon2):\r\n dist = math.sqrt((lat2 - lat1)**2 + (lon2 - lon1)**2)\r\n return dist\r\n\r\n## @function probe_networks()\r\n# @description: get probing agents for all networks.\r\ndef probe_networks():\r\n NetProbing.objects.all().delete()\r\n nets = Network.objects.all()\r\n agent_typs = [\"planetlab\", \"azure\"]\r\n\r\n for net in nets:\r\n for agentTyp in agent_typs:\r\n cur_agent = get_agent(net, agentTyp)\r\n if cur_agent:\r\n netProbe = NetProbing(network=net, agent=cur_agent)\r\n netProbe.save()\r\n\r\n## @function probe_servers()\r\n# @description: get probing agents for all servers.\r\ndef probe_servers():\r\n ServerProbing.objects.all().delete()\r\n\r\n servers = Server.objects.all()\r\n agent_typs = [\"planetlab\", \"azure\"]\r\n\r\n for srv in servers:\r\n for agentTyp in agent_typs:\r\n cur_agent = get_agent(srv, agentTyp)\r\n if cur_agent:\r\n srvProbe = ServerProbing(server=srv, agent=cur_agent)\r\n srvProbe.save()\r\n\r\n## @function merge_networks(network, new_network)\r\n# @description: merge the network info to the new_network\r\ndef merge_networks(network, new_network):\r\n if new_network.id != network.id:\r\n logger.info(\"Network id changes after revision\")\r\n all_net_edges = NetEdge.objects.filter(Q(srcNet=network) | Q(dstNet=network))\r\n for net_edge in all_net_edges.distinct():\r\n if net_edge.srcNet.id == network.id:\r\n try:\r\n new_net_edge = NetEdge.objects.get(srcNet=new_network, dstNet=net_edge.dstNet)\r\n except:\r\n new_net_edge = NetEdge(srcNet=new_network, dstNet=net_edge.dstNet)\r\n new_net_edge.save()\r\n else:\r\n try:\r\n new_net_edge = NetEdge.objects.get(srcNet=net_edge.srcNet, dstNet=new_network)\r\n except:\r\n new_net_edge = NetEdge(srcNet=net_edge.srcNet, dstNet=new_network)\r\n new_net_edge.save()\r\n logger.info(\"Finish updating all network edges\")\r\n\r\n if new_network.isp != network.isp:\r\n all_isp_edges = PeeringEdge.objects.filter(Q(srcISP=network.isp)|Q(dstISP=network.isp))\r\n for peering in all_isp_edges.distinct():\r\n if peering.srcISP == network.isp:\r\n try:\r\n new_peering = PeeringEdge.objects.get(srcISP=new_network.isp, dstISP=peering.dstISP)\r\n except:\r\n new_peering = PeeringEdge(srcISP=new_network.isp, dstISP=peering.dstISP)\r\n new_peering.save()\r\n else:\r\n try:\r\n new_peering = PeeringEdge.objects.get(srcISP=peering.srcISP, dstISP=new_network.isp)\r\n except:\r\n new_peering = PeeringEdge(srcISP=peering.srcISP, dstISP=new_network.isp)\r\n new_peering.save()\r\n logger.info(\"Finish update all ISP peering relationships!\")\r\n\r\n ## Merge all fields when new_network is not the original network\r\n for node in network.nodes.distinct():\r\n node.network = new_network\r\n node.save()\r\n if node not in new_network.nodes.all():\r\n new_network.nodes.add(node)\r\n\r\n #for nd in new_network.nodes.all():\r\n # print(nd.__str__())\r\n new_network.save()\r\n logger.info(\"Finish merging nodes!\")\r\n\r\n ## Merge session\r\n for session in network.related_sessions.all().distinct():\r\n if session not in new_network.related_sessions.all():\r\n preSubnets = Subnetwork.objects.filter(session=session, network=network).distinct()\r\n for preNet in preSubnets:\r\n subnet = Subnetwork(session=session, network=new_network, netID=preNet.netID)\r\n subnet.save()\r\n preNet.delete()\r\n logger.info(\"Finish merging sessions!\")\r\n\r\n ## Merge agents\r\n for agent in network.agents.all():\r\n if agent not in new_network.agents.all():\r\n curNetProbing = NetProbing(network=new_network, agent=agent)\r\n curNetProbing.save()\r\n logger.info(\"Finish merging agents!\")\r\n\r\n preLats = list(network.latencies.all())\r\n new_network.latencies.add(*preLats)\r\n\r\n # for lat in new_network.latencies.all():\r\n # print(lat.__str__())\r\n\r\n network.delete()\r\n return new_network\r\n\r\n# @descr: Prepare the json data to scatter the # of QoE anomalies over various properties of ISP and networks.\r\ndef get_scatter_origin_anomalies_json():\r\n logger.info(\"Running get_scatter_origin_anomalies_json\")\r\n # all_origin_stats_dict = getQoEAnomaliesStats()\r\n all_origin_stats_dict = getAnomaliesPerSessions()\r\n\r\n scatter_origin_json = {}\r\n for origin_type in all_origin_stats_dict.keys():\r\n if origin_type not in scatter_origin_json.keys():\r\n scatter_origin_json[origin_type] = {}\r\n for severity in all_origin_stats_dict[origin_type].keys():\r\n ## Ignore the origin field\r\n if severity == \"origin\":\r\n continue\r\n\r\n if severity not in scatter_origin_json[origin_type].keys():\r\n scatter_origin_json[origin_type][severity] = []\r\n cur_anomaly_cnts = all_origin_stats_dict[origin_type][severity]\r\n origins = all_origin_stats_dict[origin_type][\"origin\"]\r\n if \"ISP\" in origin_type:\r\n for i, origin in enumerate(origins):\r\n # print(\"Processing ISP with AS number : \" + origin)\r\n try:\r\n isp = ISP.objects.get(ASNumber=origin)\r\n # print(\"Obtained ISP with AS number : \" + origin)\r\n anomalyCnt = cur_anomaly_cnts[i][\"y\"]\r\n if anomalyCnt > 0:\r\n # print(anomalyCnt)\r\n scatter_origin_json[origin_type][severity].append({\"as\":isp.ASNumber, \"isp\":isp.name, \"geoCoverage\":isp.get_geo_coverage(),\r\n \"peers\": len(isp.get_peers()), \"size\":isp.get_node_size(), \"span\":isp.get_max_span(), \"count\":anomalyCnt})\r\n except:\r\n logger.info(\"Unexpected error:\" + str(sys.exc_info()[0]))\r\n logger.info(\"ISP AS \" + origin + \" was not monitored. The origin is with label: \" +\r\n cur_anomaly_cnts[i][\"label\"])\r\n continue\r\n elif \"Net\" in origin_type:\r\n for i, origin in enumerate(origins):\r\n netAS, lat, lon = origin.split(\",\")\r\n # print(\"Processing network with AS: \" + netAS + \" and location at (\" + lat + \",\" + lon + \")\")\r\n try:\r\n isp = ISP.objects.get(ASNumber=netAS)\r\n # print(\"Get ISP object with AS: \" + netAS)\r\n net = Network.objects.get(isp=isp, latitude=lat, longitude=lon)\r\n # print(\"Obtained network object with AS: \" + netAS + \" at (\" + lat + \",\" + lon + \")\")\r\n azLatMn, azLatStd = get_lat_stat(net.latencies.filter(agent__agentType=\"azure\"))\r\n plLatMn, plLatStd = get_lat_stat(net.latencies.filter(agent__agentType=\"planetlab\"))\r\n\r\n anomalyCnt = cur_anomaly_cnts[i][\"y\"]\r\n # print(anomalyCnt)\r\n if anomalyCnt > 0:\r\n scatter_origin_json[origin_type][severity].append(\r\n {\"as\": net.isp.ASNumber, \"isp\": net.isp.name, \"name\": net.__str__(), \"city\":net.city, \"region\":net.region,\r\n \"country\":net.country, \"size\": net.get_nodes_num(), \"span\":net.get_max_size(),\r\n \"azMean\":azLatMn, \"azStd\":azLatStd, \"plMean\":plLatMn, \"plStd\":plLatStd, \"count\":anomalyCnt})\r\n except:\r\n logger.info(\"Unexpected error:\" + str(sys.exc_info()[0]))\r\n logger.info(\"Network \" + origin + \" was not monitored. The origin is with label: \" +\r\n cur_anomaly_cnts[i][\"label\"])\r\n continue\r\n elif origin_type == \"server\":\r\n for i, origin in enumerate(origins):\r\n # print(\"Processing server with ip : \" + origin)\r\n try:\r\n server = Server.objects.get(node__ip=origin)\r\n # print(\"Obtained server with ip : \" + origin)\r\n azLatMn, azLatStd = get_lat_stat(server.latencies.filter(agent__agentType=\"azure\"))\r\n plLatMn, plLatStd = get_lat_stat(server.latencies.filter(agent__agentType=\"planetlab\"))\r\n\r\n anomalyCnt = cur_anomaly_cnts[i][\"y\"]\r\n #print(anomalyCnt)\r\n if anomalyCnt > 0:\r\n scatter_origin_json[origin_type][severity].append(\r\n {\"ip\": server.node.ip, \"city\": server.node.network.city, \"region\":server.node.network.region, \"country\":server.node.network.country,\r\n \"azMean\":azLatMn, \"azStd\":azLatStd, \"plMean\":plLatMn, \"plStd\":plLatStd, \"count\":anomalyCnt})\r\n except:\r\n logger.info(\"Unexpected error:\" + str(sys.exc_info()[0]))\r\n logger.info(\"Server with IP \" + origin + \" was not monitored. The origin is with label: \" +\r\n cur_anomaly_cnts[i][\"label\"])\r\n continue\r\n else:\r\n continue\r\n\r\n return scatter_origin_json","repo_name":"ephemeral2eternity/monitor-cmu-agens","sub_path":"monitorTopology/monitor_utils.py","file_name":"monitor_utils.py","file_ext":"py","file_size_in_byte":22981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"24900129664","text":"from unittest import TestCase\n\nfrom beautiful_date import Mar\n\nfrom gcsa.free_busy import FreeBusy, TimeRange\nfrom gcsa.serializers.free_busy_serializer import FreeBusySerializer\n\n\nclass TestFreeBusy(TestCase):\n def test_iter(self):\n free_busy = FreeBusy(\n time_min=(24 / Mar / 2023)[13:22],\n time_max=(25 / Mar / 2023)[13:22],\n groups={},\n calendars={\n 'calendar1': [\n TimeRange((24 / Mar / 2023)[14:22], (24 / Mar / 2023)[15:22]),\n TimeRange((24 / Mar / 2023)[17:22], (24 / Mar / 2023)[18:22]),\n ]\n }\n )\n\n ranges = list(free_busy)\n self.assertEqual(len(ranges), 2)\n self.assertEqual(ranges[0], free_busy.calendars['calendar1'][0])\n self.assertEqual(ranges[1], free_busy.calendars['calendar1'][1])\n\n def test_iter_errors(self):\n free_busy = FreeBusy(\n time_min=(24 / Mar / 2023)[13:22],\n time_max=(25 / Mar / 2023)[13:22],\n groups={},\n calendars={\n 'calendar1': [\n TimeRange((24 / Mar / 2023)[14:22], (24 / Mar / 2023)[15:22]),\n TimeRange((24 / Mar / 2023)[17:22], (24 / Mar / 2023)[18:22]),\n ],\n 'calendar2': [\n TimeRange((24 / Mar / 2023)[15:22], (24 / Mar / 2023)[16:22]),\n TimeRange((24 / Mar / 2023)[18:22], (24 / Mar / 2023)[19:22]),\n ]\n }\n )\n\n with self.assertRaises(ValueError):\n iter(free_busy)\n\n free_busy = FreeBusy(\n time_min=(24 / Mar / 2023)[13:22],\n time_max=(25 / Mar / 2023)[13:22],\n groups={},\n calendars={\n 'calendar1': [\n TimeRange((24 / Mar / 2023)[14:22], (24 / Mar / 2023)[15:22]),\n TimeRange((24 / Mar / 2023)[17:22], (24 / Mar / 2023)[18:22]),\n ]\n },\n calendars_errors={\n 'calendar2': ['notFound']\n }\n )\n with self.assertRaises(ValueError):\n iter(free_busy)\n\n free_busy = FreeBusy(\n time_min=(24 / Mar / 2023)[13:22],\n time_max=(25 / Mar / 2023)[13:22],\n groups={},\n calendars={},\n calendars_errors={\n 'calendar1': ['notFound']\n }\n )\n with self.assertRaises(ValueError):\n iter(free_busy)\n\n def test_repr_str(self):\n free_busy = FreeBusy(\n time_min=(24 / Mar / 2023)[13:22],\n time_max=(25 / Mar / 2023)[13:22],\n groups={'group1': ['calendar1', 'calendar2']},\n calendars={\n 'calendar1': [\n TimeRange((24 / Mar / 2023)[14:22], (24 / Mar / 2023)[15:22]),\n TimeRange((24 / Mar / 2023)[17:22], (24 / Mar / 2023)[18:22]),\n ],\n 'calendar2': [\n TimeRange((24 / Mar / 2023)[15:22], (24 / Mar / 2023)[16:22]),\n TimeRange((24 / Mar / 2023)[18:22], (24 / Mar / 2023)[19:22]),\n ]\n }\n )\n self.assertEqual(free_busy.__repr__(), \"\")\n self.assertEqual(free_busy.__str__(), \"\")\n\n\nclass TestFreeBusySerializer(TestCase):\n def test_to_json(self):\n free_busy = FreeBusy(\n time_min=(24 / Mar / 2023)[13:22],\n time_max=(25 / Mar / 2023)[13:22],\n groups={'group1': ['calendar1', 'calendar2']},\n calendars={\n 'calendar1': [\n TimeRange((24 / Mar / 2023)[14:22], (24 / Mar / 2023)[15:22]),\n TimeRange((24 / Mar / 2023)[17:22], (24 / Mar / 2023)[18:22]),\n ],\n 'calendar2': [\n TimeRange((24 / Mar / 2023)[15:22], (24 / Mar / 2023)[16:22]),\n TimeRange((24 / Mar / 2023)[18:22], (24 / Mar / 2023)[19:22]),\n ]\n },\n groups_errors={\n \"non-existing-group\": [\n {\n \"domain\": \"global\",\n \"reason\": \"notFound\"\n }\n ]\n },\n calendars_errors={\n \"non-existing-calendar\": [\n {\n \"domain\": \"global\",\n \"reason\": \"notFound\"\n }\n ]\n }\n )\n\n free_busy_json = FreeBusySerializer.to_json(free_busy)\n self.assertEqual(free_busy_json['timeMin'], '2023-03-24T13:22:00')\n self.assertEqual(free_busy_json['timeMax'], '2023-03-25T13:22:00')\n self.assertIn('calendar1', free_busy_json['calendars'])\n self.assertIn('calendar2', free_busy_json['calendars'])\n self.assertIn('non-existing-calendar', free_busy_json['calendars'])\n self.assertIn('group1', free_busy_json['groups'])\n self.assertIn('non-existing-group', free_busy_json['groups'])\n\n def test_to_object(self):\n free_busy_json = {\n 'calendars': {\n 'calendar1': {\n 'busy': [{'start': '2023-03-24T14:22:00', 'end': '2023-03-24T15:22:00'},\n {'start': '2023-03-24T17:22:00', 'end': '2023-03-24T18:22:00'}],\n },\n 'calendar2': {\n 'busy': [{'start': '2023-03-24T15:22:00', 'end': '2023-03-24T16:22:00'}],\n },\n 'non-existing-calendar': {\n 'errors': [{'domain': 'global', 'reason': 'notFound'}]\n }\n },\n 'groups': {\n 'group1': {\n 'calendars': ['calendar1', 'calendar2'],\n },\n 'non-existing-group': {\n 'errors': [{'domain': 'global', 'reason': 'notFound'}]\n }\n },\n 'timeMin': '2023-03-24T13:22:00',\n 'timeMax': '2023-03-25T13:22:00'\n }\n\n free_busy = FreeBusySerializer.to_object(free_busy_json)\n\n self.assertEqual(free_busy.time_min, (24 / Mar / 2023)[13:22])\n self.assertEqual(free_busy.time_max, (25 / Mar / 2023)[13:22])\n\n self.assertIn('calendar1', free_busy.calendars)\n self.assertIn('calendar2', free_busy.calendars)\n self.assertNotIn('calendar1', free_busy.calendars_errors)\n self.assertNotIn('calendar2', free_busy.calendars_errors)\n self.assertEqual(len(free_busy.calendars['calendar1']), 2)\n self.assertEqual(len(free_busy.calendars['calendar2']), 1)\n self.assertNotIn('non-existing-calendar', free_busy.calendars)\n self.assertIn('non-existing-calendar', free_busy.calendars_errors)\n\n self.assertIn('group1', free_busy.groups)\n self.assertNotIn('group1', free_busy.groups_errors)\n self.assertEqual(len(free_busy.groups['group1']), 2)\n self.assertIn('non-existing-group', free_busy.groups_errors)\n self.assertNotIn('non-existing-group', free_busy.groups)\n\n free_busy_json = \"\"\"{\n \"timeMin\": \"2023-03-24T13:22:00\",\n \"timeMax\": \"2023-03-25T13:22:00\"\n }\"\"\"\n\n free_busy = FreeBusySerializer(free_busy_json).to_object(free_busy_json)\n self.assertEqual(free_busy.time_min, (24 / Mar / 2023)[13:22])\n self.assertEqual(free_busy.time_max, (25 / Mar / 2023)[13:22])\n","repo_name":"kuzmoyev/google-calendar-simple-api","sub_path":"tests/test_free_busy.py","file_name":"test_free_busy.py","file_ext":"py","file_size_in_byte":7559,"program_lang":"python","lang":"en","doc_type":"code","stars":460,"dataset":"github-code","pt":"81"} +{"seq_id":"39719302174","text":"\"\"\"\nGiven an array of integers A of size N.\nA triplet (i, j, k), i <= j <= k is called a power triplet if A[i] ^ A[i+1] ^....A[j-1] = A[j] ^.....^A[k].\nWhere, ^ denotes bitwise xor.\nReturn the count of all possible power triplets. Since the answer could be large return answer % 109 +7.\n\"\"\"\n#Question was part of tries, but solved it using prefix XOR and dict(hashmap)\n#if 2 same same element found in xor prefix then all the between terms will follow the above property.\n\nclass Solution:\n #add to xordict\n def adddict(self, key, val, tempdict):\n if key in tempdict:\n tempdict[key].append(val)\n else:\n tempdict[key] = [val]\n \n #calculate number of elements between any two same element \n def between(self, templist):\n if len(templist) < 2: return 0\n tempans = 0\n for i in range(len(templist)):\n for j in range(i+1, len(templist)):\n tempans = (tempans + (templist[j]-templist[i]-1))%(10**9+7)\n return tempans%(10**9+7) \n\n #paramA = Input array \n def solve(self, A):\n temp = 0\n #maintain a dict \n xordict = dict()\n xordict[0] = [0]\n #calculate prefix sum and at the same time populate dict\n for i, val in enumerate(A):\n temp^= val\n A[i] = temp\n self.adddict(temp, i+1, xordict)\n A.insert(0, 0)\n ans = 0\n for valList in xordict.values():\n ans = (ans + self.between(valList))%(10**9+7)\n return ans%(10**9+7)\n \n# Another Solution\n","repo_name":"anurag5398/DSA-Problems","sub_path":"Misc/XORtriplets.py","file_name":"XORtriplets.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70850774986","text":"import tensorflow as tf\nimport numpy as np\n\n# load fashion mnist dataset from tensorflow dataset library\nfashion_mnist = tf.keras.datasets.fashion_mnist\n# split data into training and test sets\n(training_images, training_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\n# build neural network model with 3 layers \n# and define input shape to be 28 x 28\n# use softmax activation function to output class probabilities\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28,28)),\n tf.keras.layers.Dense(units=512, activation = tf.nn.relu),\n tf.keras.layers.Dense(units=512, activation = tf.nn.relu),\n tf.keras.layers.Dense(units=10, activation=tf.nn.softmax)\n])\n\n# normalize grayscale images\ntraining_images = training_images / 255.0\ntest_images = test_images / 255.0\n\n# choose optimizer, loss and metrics\nmodel.compile(optimizer=\"adam\",\n loss=\"sparse_categorical_crossentropy\",\n metrics=[\"accuracy\"])\n\n# train model with given training dataset and number of epochs\nmodel.fit(training_images, training_labels, epochs=7)\n\nprint(\"Evaluation Results\")\n# evaluate model on test set\nmodel.evaluate(test_images, test_labels)\n\n# make predictions\nclassifications = model.predict(test_images)\n\n# print and compare prediction results with actual labels\nprint(type(classifications))\nprint(classifications.shape)\n\nprint(\"Model Prediction Probablities for Image 1, 2, 3\")\nprint(classifications[0])\nprint(classifications[1])\nprint(classifications[2])\n\nprint(\"Max Prediction Probablities for Image 1, 2, 3\")\n\nprint(np.argmax(classifications[0]))\nprint(np.argmax(classifications[1]))\nprint(np.argmax(classifications[2]))\n\nprint(\"Expected Labels for Image 1, 2, 3\")\nprint(test_labels[0])\nprint(test_labels[1])\nprint(test_labels[2])\n\nclass myCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n if(logs.get(\"accuracy\") > 0.88 ):\n self.model.stop_training = True\n\n\n\n# build neural network model with 3 layers \n# and define input shape to be 28 x 28\n# use softmax activation function to output class probabilities\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28,28)),\n tf.keras.layers.Dense(units=512, activation = tf.nn.relu),\n tf.keras.layers.Dense(units=512, activation = tf.nn.relu),\n tf.keras.layers.Dense(units=10, activation=tf.nn.softmax)\n])\n\n# choose optimizer, loss and metrics\nmodel.compile(optimizer = tf.keras.optimizers.Adam(),\n loss = 'sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n# create a callback instance from myCallback class\ncallback = myCallback()\nmodel.fit(training_images, training_labels, epochs=7, callbacks=[callback])\n\nprint(\"Evaluation Results\")\n# evaluate model on test set\nmodel.evaluate(test_images, test_labels)","repo_name":"VahapML/Tiny-Machine-Learning","sub_path":"6_exploring_dnn.py","file_name":"6_exploring_dnn.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22219211102","text":"from tkinter import * #티킨터 모듈에 있는 함수를 사용\n\n\"\"\"기본 프레임 설정\"\"\"\nroot = Tk() # tk창 생성\nroot.title(\"JSY GUI\") # 제목 설정\nroot.geometry(\"640x480\")\n\n\"\"\"리스트 박스\"\"\"\n# mode: single도 있다.\n# height = 0은 모든 요소를 보요줄 만큼의 크기 설정\nlistbox = Listbox(root, selectmode=\"extended\", height=0)\nlistbox.insert(0, \"사과\")\nlistbox.insert(1, \"딸기\")\nlistbox.insert(2, \"바나나\")\nlistbox.insert(END, \"수박\")\nlistbox.insert(END, \"포도\")\nlistbox.pack()\n\n\n#리스트 박스 응용\ndef btncmd():\n \"\"\"삭제\"\"\"\n listbox.delete(END) # 맨 뒤에 항목을 삭제\n listbox.delete(0) # 맨 앞의 항목을 삭제\n\n \"\"\"개수 확인\"\"\"\n print(\"리스트에는\", listbox.size(), \"개가 있습니다.\")\n\n \"\"\"항목 확인\"\"\"\n print(\"1~3번째 ��목:\", listbox.get(0, 2))\n \n \"\"\"선택된 항목 확인(인덱스 위치로 반환됨)\"\"\"\n print(\"선택된 항목:\", listbox.curselection())\n\nbtn = Button(root, text=\"클릭\", command=btncmd)\nbtn.pack()\n\n\nroot.mainloop() #창이 닫히지 않도록 하는 명령어","repo_name":"JoSangYeon/Python_Tkinter_Project","sub_path":"widget/5_listbox.py","file_name":"5_listbox.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19225781793","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, Http404\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom .models import Team,Match,Scored\n\nfrom .scoreutils import get_scores\nfrom .twitter_functions import get_tweets\n\nfrom datetime import datetime, date\nfrom pytz import timezone\n\nfrom .forms import search_team_form\n\n\ndef index(request):\n tz = timezone('America/New_York')\n right_now = datetime.now(tz)\n today_date = date(right_now.year, right_now.month, right_now.day)\n\n today_matches = Match.objects.filter(date = today_date)\n return render(request, 'index.html',{'match_query':today_matches})\n\n\ndef history(request):\n return render(request, 'history.html')\n\n\ndef contact(request):\n return render(request, 'contact.html')\n\n\ndef scores(request):\n score_list = get_scores()\n score_list1 = []\n if score_list != None:\n for item in score_list:\n lteam = Team.objects.get(pk = item['leftplayer'])\n rteam = Team.objects.get(pk = item['rightplayer'])\n\n curr_match = Match.objects.get(LeftTeam = lteam, RightTeam = rteam, date = datetime.now().date())\n\n scored = Scored.objects.get(match = curr_match)\n scores_dict = {\n 'lteam': lteam,\n 'rteam': rteam,\n 'curr_match': curr_match,\n 'score': scored,\n 'minute': item['minute']\n }\n\n score_list1.append(scores_dict)\n return render(request, 'scores.html', {'score_list':score_list1 })\n\ndef twitterfeed(request):\n tweetList = get_tweets('world cup')\n #return HttpResponse(str(tweetList))\n return render(request, 'twitter.html', {'tweet_list': tweetList})\n\ndef search_team(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = search_team_form(request.POST)\n # check whether it's valid:\n if form.is_valid():\n team = form.cleaned_data['team']\n team_query = None\n try:\n team_query = Team.objects.get(pk = str(team))\n except ObjectDoesNotExist:\n raise Http404(\"Team is not playing in the World Cup\")\n\n return render(request, 'team.html', {'team_query': team_query})\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = search_team_form()\n\n return render(request, 'team_search.html', {'form': form})\n","repo_name":"carlaoutput/WorldCupPy","sub_path":"worldcup/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"1787520545","text":"import pytest\nfrom cromwell_tools.cli import parser as cli_parser\nimport tempfile\nimport os\nimport six\nimport json\nfrom cromwell_tools.cromwell_auth import CromwellAuth\n\n\nsix.add_move(six.MovedModule('mock', 'mock', 'unittest.mock'))\nfrom six.moves import mock # noqa\n\n\n@pytest.fixture(scope=\"module\")\ndef username_password_auth():\n return [\n \"--username\",\n \"fake-user\",\n \"--password\",\n \"fake-pwd\",\n \"--url\",\n \"https://fake-cromwell\",\n ]\n\n\n@pytest.fixture(scope=\"module\")\ndef no_auth():\n return [\"--url\", \"https://fake-cromwell\"]\n\n\n@pytest.fixture(scope=\"module\")\ndef service_account_auth():\n temp_dir = tempfile.mkdtemp()\n service_account_key = os.path.join(temp_dir, 'fake_key.json')\n fake_svc_info = {\"token_uri\": \"foo\", \"client_email\": \"bar\", \"private_key\": \"baz\"}\n with open(service_account_key, 'w') as f:\n json.dump(fake_svc_info, f)\n return [\n \"--service-account-key\",\n service_account_key,\n \"--url\",\n \"https://fake-cromwell\",\n ]\n\n\n@pytest.fixture(scope=\"module\")\ndef secret_file_auth():\n temp_dir = tempfile.mkdtemp()\n secrets_file = os.path.join(temp_dir, 'fake_secrets.json')\n auth_params = {\n \"url\": \"https://fake-cromwell\",\n \"username\": \"fake-user\",\n \"password\": \"fake-pwd\",\n }\n with open(secrets_file, 'w') as f:\n json.dump(auth_params, f)\n return [\"--secrets-file\", secrets_file]\n\n\ndef test_cli_print_version_info():\n \"\"\"Make sure the CLI prints version info properly\"\"\"\n user_inputs = [\"-V\"]\n with pytest.raises(SystemExit) as pytest_wrapped_exit:\n cli_parser(user_inputs)\n assert pytest_wrapped_exit.type == SystemExit\n assert pytest_wrapped_exit.value.code == 0\n\n\ndef test_cli_command_raise_value_error_when_no_creds_provided():\n \"\"\"Make sure the CLI raise exception about the auth when no creds provided.\"\"\"\n user_inputs = [\"submit\", \"--wdl-file\", \"fake.wdl\", \"--inputs-files\", \"fake.json\"]\n with pytest.raises(ValueError):\n command, args = cli_parser(user_inputs)\n\n\ndef test_cli_command_works_with_username_password_auth(username_password_auth):\n \"\"\"Use the submit command as an example to prove CLI works with u/p auth.\"\"\"\n user_inputs = [\n \"submit\",\n \"--wdl-file\",\n \"fake.wdl\",\n \"--inputs-files\",\n \"fake.json\",\n ] + username_password_auth\n command, args = cli_parser(user_inputs)\n\n\ndef test_cli_command_works_with_no_auth(no_auth):\n \"\"\"Use the submit command as an example to prove CLI works with u/p auth.\"\"\"\n user_inputs = [\n \"submit\",\n \"--wdl-file\",\n \"fake.wdl\",\n \"--inputs-files\",\n \"fake.json\",\n ] + no_auth\n command, args = cli_parser(user_inputs)\n\n\n@mock.patch('cromwell_tools.cromwell_auth.CromwellAuth.from_service_account_key_file')\ndef test_cli_command_works_with_service_account_auth(mock_header, service_account_auth):\n \"\"\"Use the submit command as an example to prove CLI works with u/p auth.\"\"\"\n expected_auth = CromwellAuth(\n url=\"https://fake-cromwell\",\n header={\"Authorization\": \"bearer fake_token\"},\n auth=None,\n )\n mock_header.return_value = expected_auth\n user_inputs = [\n \"submit\",\n \"--wdl-file\",\n \"fake.wdl\",\n \"--inputs-files\",\n \"fake.json\",\n ] + service_account_auth\n command, args = cli_parser(user_inputs)\n\n\ndef test_cli_command_works_with_secrets_file_auth(secret_file_auth):\n \"\"\"Use the submit command as an example to prove CLI works with u/p auth.\"\"\"\n user_inputs = [\n \"submit\",\n \"--wdl-file\",\n \"fake.wdl\",\n \"--inputs-files\",\n \"fake.json\",\n ] + secret_file_auth\n command, args = cli_parser(user_inputs)\n\n\ndef test_cli_submit_command(no_auth):\n \"\"\"Test the submit command (with no-auth for simplicity).\"\"\"\n user_inputs = [\n \"submit\",\n \"--wdl-file\",\n \"fake.wdl\",\n \"--inputs-files\",\n \"fake.json\",\n ] + no_auth\n command, args = cli_parser(user_inputs)\n assert command.__name__ == \"submit\"\n assert args['wdl_file'] == \"fake.wdl\"\n assert \"fake.json\" in args['inputs_files']\n\n\ndef test_cli_wait_command(no_auth):\n \"\"\"Test the wait command (with no-auth for simplicity).\"\"\"\n user_inputs = [\n \"wait\",\n \"--poll-interval-seconds\",\n \"10\",\n \"00000000-0000-0000-0000-000000000000\",\n \"00000000-0000-0000-0000-000000000000\",\n ] + no_auth\n command, args = cli_parser(user_inputs)\n assert command.__name__ == \"wait\"\n assert \"00000000-0000-0000-0000-000000000000\" in args[\"workflow_ids\"]\n\n\ndef test_cli_status_command(no_auth):\n \"\"\"Test the status command (with no-auth for simplicity).\"\"\"\n user_inputs = [\"status\", \"--uuid\", \"00000000-0000-0000-0000-000000000000\"] + no_auth\n command, args = cli_parser(user_inputs)\n assert command.__name__ == \"status\"\n assert args[\"uuid\"] == \"00000000-0000-0000-0000-000000000000\"\n\n\ndef test_cli_abort_command(no_auth):\n \"\"\"Test the abort command (with no-auth for simplicity).\"\"\"\n user_inputs = [\"abort\", \"--uuid\", \"00000000-0000-0000-0000-000000000000\"] + no_auth\n command, args = cli_parser(user_inputs)\n assert command.__name__ == \"abort\"\n assert args[\"uuid\"] == \"00000000-0000-0000-0000-000000000000\"\n\n\ndef test_cli_release_hold_command(no_auth):\n \"\"\"Test the release hold command (with no-auth for simplicity).\"\"\"\n user_inputs = [\n \"release_hold\",\n \"--uuid\",\n \"00000000-0000-0000-0000-000000000000\",\n ] + no_auth\n command, args = cli_parser(user_inputs)\n assert command.__name__ == \"release_hold\"\n assert args[\"uuid\"] == \"00000000-0000-0000-0000-000000000000\"\n\n\ndef test_cli_metadata_command(no_auth):\n \"\"\"Test the metadata command (with no-auth for simplicity).\"\"\"\n user_inputs = [\n \"metadata\",\n \"--uuid\",\n \"00000000-0000-0000-0000-000000000000\",\n \"--includeKey\",\n \"jobId\",\n ] + no_auth\n command, args = cli_parser(user_inputs)\n assert command.__name__ == \"metadata\"\n assert args[\"uuid\"] == \"00000000-0000-0000-0000-000000000000\"\n assert \"jobId\" in args[\"includeKey\"]\n\n\ndef test_cli_query_command(no_auth):\n \"\"\"Test the query command (with no-auth for simplicity).\"\"\"\n # Not Implemented yet\n assert True\n\n\ndef test_cli_health_command(no_auth):\n \"\"\"Test the health command (with no-auth for simplicity).\"\"\"\n user_inputs = [\"health\"] + no_auth\n command, args = cli_parser(user_inputs)\n assert command.__name__ == \"health\"\n\n\ndef test_cli_task_runtime_command(no_auth):\n \"\"\"Test the task_runtime command (with no-auth for simplicity).\"\"\"\n user_inputs = [\n \"task_runtime\",\n \"--uuid\",\n \"00000000-0000-0000-0000-000000000000\",\n ] + no_auth\n command, args = cli_parser(user_inputs)\n assert command.__name__ == \"run\" # task_runtime's entrypoint is run()\n assert args[\"uuid\"] == \"00000000-0000-0000-0000-000000000000\"\n","repo_name":"broadinstitute/cromwell-tools","sub_path":"cromwell_tools/tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":6985,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"81"} +{"seq_id":"9304128537","text":"def calc_program(intcode, noun, verb):\n\tintcode = list(intcode)\n\tintcode[1], intcode[2] = noun, verb \n\tsupported_opcodes = set([1, 2, 99])\n\n\ti = 0\n\twhile i < len(intcode) - 3:\n\t\topcode = intcode[i]\n\t\tif opcode in supported_opcodes:\n\t\t\ta, b, = intcode[intcode[i + 1]], intcode[intcode[i + 2]]\n\t\t\tif opcode == 1:\n\t\t\t\tintcode[intcode[i + 3]] = a + b\n\t\t\telif opcode == 2:\n\t\t\t\tintcode[intcode[i + 3]] = a * b\n\t\t\telse:\n\t\t\t\tbreak\n\t\ti += 4\n\treturn intcode[0]\n \n# part 1\nintcode = [int(o) for o in open('input.txt', 'r').readline().split(',')]\nprint(calc_program(intcode, 12, 2))\n\n# part 2\nnoun = verb = None\nfor i in range(100):\n\tfor j in range(100):\n\t\tres = calc_program(intcode, i, j)\n\t\tif res == 19690720:\n\t\t\tprint(100 * i + j)\n","repo_name":"r-tran/advent-of-code","sub_path":"aoc-2019/day2/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9894739385","text":"import datetime\n\nfrom conf import JWT_ACCESS_TOKEN_EXP, JWT_REFRESH_TOKEN_EXP\n\n\ndef experied_at(access_token=True):\n if access_token:\n exp_seconds = JWT_ACCESS_TOKEN_EXP\n else:\n exp_seconds = JWT_REFRESH_TOKEN_EXP\n\n now = datetime.datetime.utcnow()\n delta = datetime.timedelta(seconds=exp_seconds)\n return now + delta\n","repo_name":"fr33mang/aiohttp_hello_world","sub_path":"app/authorization/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36191264866","text":"# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nfrom sys import argv\r\nimport xml.etree.ElementTree as xml\r\n\r\nif (len(argv) == 1):\r\n print('Не было передано аргументов!')\r\n exit\r\n\r\nsourcePath = argv[1][7::]\r\ndestinationPath = argv[2][8::]\r\n\r\nif (not os.path.isfile(sourcePath)):\r\n print(\"Не удалось открыть файл-источник, такого файла нет!\")\r\n exit()\r\n\r\nelse:\r\n source = open(sourcePath, 'r')\r\n edges = []\r\n for x in source.read().split(sep='), '):\r\n edges.append(x[1:].split(sep=', '))\r\n\r\n edgesAmount = len(edges)\r\n edges[edgesAmount - 1][2] = edges[edgesAmount - 1][2][:-1:]\r\n\r\n for i in range(0, edgesAmount - 1):\r\n edge1 = edges[i]\r\n for edge2 in edges:\r\n if edge1 != edge2:\r\n if edge1[1] == edge2[1] and edge1[2] == edge2[2]:\r\n print(\"Ошибка формата! Строка:\", i + 1)\r\n exit()\r\n\r\n edges.sort(key=lambda i: (i[1], i[2]))\r\n\r\n vertexList = []\r\n for i in range(edgesAmount):\r\n vertexList.append(edges[i][0])\r\n vertexList.append(edges[i][1])\r\n vertexList.sort()\r\n uniqueVertexList = []\r\n for x in vertexList:\r\n if x not in uniqueVertexList:\r\n uniqueVertexList.append(x)\r\n\r\n graph = xml.Element(\"graph\")\r\n\r\n for i in uniqueVertexList:\r\n vertex = xml.SubElement(graph, \"vertex\")\r\n vertex.text = i\r\n\r\n for i in edges:\r\n arc = xml.SubElement(graph, \"arc\")\r\n fromValue = xml.SubElement(arc, \"from\")\r\n fromValue.text = i[0]\r\n toValue = xml.SubElement(arc, \"to\")\r\n toValue.text = i[1]\r\n orderValue = xml.SubElement(arc, \"order\")\r\n orderValue.text = i[2]\r\n\r\n tree = xml.ElementTree(graph)\r\n tree.write(destinationPath)","repo_name":"MyazinAlexander/Neural-networks-1","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10147322394","text":"from django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse\nimport json\nimport requests\nfrom subprocess import Popen, PIPE, STDOUT\n\n\ndef create_directory():\n # [\"python\",\"easyaslinux/scripts/your_script_name.py\",\"argument_here\" ]\n command = [\"bash\", \"django_script/scripts/test.sh\", \"create\"]\n try:\n process = Popen(command, stdout=PIPE, stderr=STDOUT)\n output = process.stdout.read()\n exitstatus = process.poll()\n if (exitstatus == 0):\n return {\"status\": \"Success\", \"output\": str(output)}\n else:\n return {\"status\": \"Failed\", \"output\": str(output)}\n except Exception as e:\n return {\"status\": \"failed\", \"output\": str(e)}\n\n\ndef delete_directory():\n\n command = [\"bash\", \"django_script/scripts/test.sh\", \"delete\"]\n try:\n process = Popen(command, stdout=PIPE, stderr=STDOUT)\n output = process.stdout.read()\n exitstatus = process.poll()\n if (exitstatus == 0):\n return {\"status\": \"Success\", \"output\": str(output)}\n else:\n return {\"status\": \"Failed\", \"output\": str(output)}\n except Exception as e:\n return {\"status\": \"failed\", \"output\": str(e)}\n\n\n@csrf_exempt\ndef file_maniputer(request):\n\n if request.method == 'POST':\n request_data = json.loads(request.body)\n\n if request_data[\"action\"] == \"create\":\n data = create_directory()\n elif request_data[\"action\"] == \"delete\":\n data = delete_directory()\n else:\n data = {\"status\": \"not defined\", \"output\": \"not defined\"}\n\n response = HttpResponse(json.dumps(\n data), content_type='application/json', status=200)\n return response\n","repo_name":"sheucke/django_script","sub_path":"django_script/django_script/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41336901308","text":"# implementation of Huffman Coding algorithm of variable length coding scheme\r\n# Soumyadeep Pal (IT UG3, Roll 002011001113)\r\n\r\n# importing required libraries\r\nimport math\r\nimport heapq\r\nimport csv\r\nimport json\r\nfrom collections import Counter\r\n\r\n# tree node class definition\r\nclass HuffmanNode:\r\n def __init__(self,symbol=None,freq=0,left=None,right=None):\r\n self.symbol=symbol\r\n self.freq=freq\r\n self.left=left\r\n self.right=right\r\n def __lt__(self,other):\r\n return self.freq1:\r\n node1=heapq.heappop(heap)\r\n node2=heapq.heappop(heap)\r\n merged_node=HuffmanNode(freq=node1.freq+node2.freq,left=node1,right=node2)\r\n heapq.heappush(heap,merged_node)\r\n root=heap[0]\r\n code_dict={} \r\n def traverse(node,code):\r\n if node.symbol:\r\n code_dict[node.symbol]=code\r\n return\r\n traverse(node.left,code+\"0\")\r\n traverse(node.right,code+\"1\")\r\n traverse(root,\"\")\r\n symbol_table={symbol:code for symbol,code in code_dict.items()}\r\n encoded_data=\"\".join([code_dict[symbol] for symbol in data])\r\n coding_tree=json.dumps(root,cls=HuffmanEncoder,indent=\" \")\r\n compression_ratio=(1-len(encoded_data)/(maxbits*len(data)))*100\r\n # Calculate the average code length\r\n code_lengths=[len(code_dict[symbol]) for symbol in freq_dict]\r\n avg_code_length=sum(code_lengths[i]*freq_dict[list(freq_dict.keys())[i]] for i in range(len(freq_dict)))/len(data)\r\n # Calculate the entropy of the data\r\n entropy=-sum(freq_dict[symbol]/len(data)*math.log(freq_dict[symbol]/len(data),2) for symbol in freq_dict)\r\n # Calculate the efficiency of the compression\r\n efficiency=(entropy/avg_code_length)*100\r\n return encoded_data,coding_tree,symbol_table,compression_ratio,avg_code_length,entropy,efficiency\r\n\r\n# main\r\nprint('------------------------------------------------------------------------------------------')\r\nmsg=input('Enter message : ')\r\ndata=Counter(msg);\r\ndatarange=(str(data)).count(':')\r\nprint('Number of characters : ',datarange)\r\nprint('Character frequencies : ',(str(data))[8:-1])\r\nmaxbits=math.ceil(math.log(datarange,2))\r\nb0=len(msg)*maxbits\r\nprint('Original message size : ',b0,' bits')\r\nencdata,tree,symboltable,compratio,avgcodelen,entropy,efficiency=huffman_encoding(msg,maxbits)\r\nprint('Encoded message : ',encdata)\r\nb1=len(encdata)\r\nprint('Encoded message size : ',b1,' bits')\r\nprint('Compression ratio : %.2f%%' % compratio)\r\nprint('------------------------------------------------------------------------------------------')\r\nprint('Average code length : %.2f bits/symbol' % avgcodelen)\r\nprint('Entropy : %.2f bits/symbol' % entropy)\r\nprint('Efficiency : %.2f' % efficiency)\r\ntextfile=open(\"HuffmanValues.txt\",\"w\")\r\ntextfile.write('Average code length : %.2f bits/symbol \\n' % avgcodelen)\r\ntextfile.write('Entropy : %.2f bits/symbol \\n' % entropy)\r\ntextfile.write('Efficiency : %.2f' % efficiency)\r\ntextfile.close()\r\nprint('------------------------------------------------------------------------------------------')\r\nprint(\"Symbol Code (Table exported in CSV file)\")\r\ncodetable=[];\r\nfor symbol,code in symboltable.items():\r\n print(f\"{symbol} {code}\")\r\n codetable.append([symbol,code])\r\nfields=['Symbol','Code']\r\nwith open('HuffmanSymbolCodeTable.csv','w',newline='') as csvfile:\r\n csvwriter=csv.writer(csvfile)\r\n csvwriter.writerow(fields)\r\n csvwriter.writerows(codetable)\r\nprint('------------------------------------------------------------------------------------------')\r\nprint('Coding Tree (Tree exported in JSON file)')\r\nprint(tree)\r\nwith open('HuffmanCodingTree.json','w') as f:\r\n json.dump(tree,f)\r\nprint('------------------------------------------------------------------------------------------')","repo_name":"realspal/multimedia-lab","sub_path":"Algo-HuffmanCoding/Huffman.py","file_name":"Huffman.py","file_ext":"py","file_size_in_byte":4526,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10045995568","text":"import paho.mqtt.client as paho\nimport os\nimport socket\nimport ssl\nimport random as r\nimport string\nimport json\nfrom time import sleep\nimport csv\n#import boto3\n\nconnflag = False\ndef get_random_value():\n return [r.randrange(20,35),r.randrange(85,100)]\n\ndef on_connect(client, userdata, flags, rc): # func for making connection\n global connflag\n print(\"Connected to AWS\")\n connflag = True\n print(\"Connection returned result: \" + str(rc))\n\ndef on_message(client, userdata, msg): # Func for Sending msg\n print(msg.topic+\" \"+str(msg.payload))\n\ndef connect():\n mqttc.tls_set(caPath, certfile=certPath, keyfile=keyPath, cert_reqs=ssl.CERT_REQUIRED,\n tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None) # pass parameters\n\n# connect to aws server\n mqttc.connect(awshost, awsport, keepalive=60)\n mqttc.loop_start()\nmqttc = paho.Client() # mqttc object\n\n# assign on_connect func\nmqttc.on_connect = on_connect\n\n# assign on_message func\nmqttc.on_message = on_message\n\n# mqttc.on_log = on_log\n\n#### Change following parameters ####\nawshost = \"a1gdantso7vrsi-ats.iot.us-east-2.amazonaws.com\" # Endpoint aws:iot:us-east-2:153855845931:thing/Group7Rpi\nawsport = 8883 # Port no.\nclientId = \"Group7RPiClient-1\" # Thing_Name\nthingName = \"Group7RPiClient\" # Thing_Name\n\ncaPath = \"Credentials/AmazonRootCA3.pem\" # Root_CA_Certificate_Name\ncertPath = \"Credentials/33c6aeda67-certificate.pem.crt\" # .cert.pem\nkeyPath = \"Credentials/33c6aeda67-private.pem.key\" # .private.key\n\ni = 0\nconnect()\nwhile i<=20:\n sleep(5)\n json_data = dict()\n data = dict()\n temp, hum = get_random_value()\n data[\"temperature\"] = temp\n data[\"humidity\"] = hum\n json_data[clientId] = data\n payload_json = json.dumps(json_data)\n # topic: temperature # Publishing Temperature values\n mqttc.publish(\"Group7Rpi\", payload_json, qos=1)\n print(json_data)\n i+=1\n","repo_name":"SailyNatekar/Rpi","sub_path":"Assignment3_IOE/Publish_thing.py","file_name":"Publish_thing.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6521113632","text":"from random import randint\nfrom tkinter import Canvas\n\nfrom Util.tkUtilities import get_widget_dimensions\nfrom ui.login_menu.LoginPage import LoginPage\nfrom util.Utilities import chunks\nfrom util.constants import WATER_SIZE, WATER_COLOR, ANIMATION_DELAY, WATER_RADIUS, APP_TITLE, APP_FONT, \\\n MAX_WATER_BOUNCE, MIN_WATER_BOUNCE, DROPLET_GROUPS\n\n\nclass Droplet:\n def __init__(self, master, x, y):\n self.master = master\n self._original_y = y\n self._x, self._y = x, y\n self._droplet = self._draw()\n self._water_below = self._draw_below()\n\n def _draw(self):\n return self.master.create_oval(self._x - WATER_SIZE, self._y - WATER_SIZE,\n self._x + WATER_SIZE, self._y + WATER_SIZE,\n fill=WATER_COLOR, width=0)\n\n def _draw_below(self):\n return self.master.create_rectangle(self._x - WATER_SIZE, self._y,\n self._x + WATER_SIZE, self._y + WATER_SIZE,\n fill=WATER_COLOR, width=0)\n\n def move(self, offset):\n self._y += offset\n self.master.coords(self._droplet, self._x - WATER_SIZE, self._y - WATER_SIZE,\n self._x + WATER_SIZE, self._y + WATER_SIZE)\n self.master.coords(self._water_below, self._x - WATER_SIZE,\n self._y, self._x + WATER_SIZE, self._original_y)\n\n def position(self):\n return self._y\n\n\nclass SplashScreen(Canvas):\n def __init__(self, master):\n super().__init__(master, bg=master['bg'], highlightthickness=0)\n self._width, self._height = 1, 1\n self._points = []\n self._point_chunks = []\n self._divisor = 60\n self._font_sizes = self._set_font_sizes()\n\n def grid(self, **kwargs):\n super().grid(**kwargs)\n self._width, self._height = get_widget_dimensions(self)\n self.create_rectangle(0, self._height, self._width, self._height,\n fill=WATER_COLOR, width=0, tags='water')\n self.create_text(self._width / 2, self._height / 2, text='Loading',\n tags='text', fill='white', font=(APP_FONT, 40, 'bold'))\n # self._animate()\n self._finish_animation()\n\n def _animate(self):\n self._points, self._point_chunks = self._create_points()\n i = 0\n while self._get_water_height() <= self._height:\n self._animate_chunks(i)\n self._animate_text(i)\n self.update()\n i += 1\n self.after(ANIMATION_DELAY)\n\n def _set_font_sizes(self):\n sizes = {}\n half = self._divisor // 2\n for i in range(half + 1):\n sizes[i] = 40 + i\n for i in range(half + 1):\n sizes[half + i] = sizes[half + i - 1] - 1\n return sizes\n\n def _create_points(self):\n points = []\n max_points = 2 * int(self._width / WATER_SIZE)\n for i in range(max_points):\n points.append(Droplet(self, i * WATER_RADIUS, self._height))\n return points, chunks(points, min(max_points, DROPLET_GROUPS))\n\n def _animate_text(self, index):\n self.itemconfig('text', font=(APP_FONT,\n self._font_sizes[index % self._divisor],\n 'bold'))\n self.tag_raise('text')\n\n def _animate_chunks(self, index):\n up_1, up_2 = -MAX_WATER_BOUNCE, - MIN_WATER_BOUNCE\n down_1, down_2 = MIN_WATER_BOUNCE, MAX_WATER_BOUNCE\n for chunk_index, chunk in enumerate(self._point_chunks):\n lower, upper = self._get_bounds(chunk_index, index, up_1,\n up_2, down_1, down_2)\n increment = randint(lower, upper) - randint(MIN_WATER_BOUNCE, MAX_WATER_BOUNCE)\n [droplet.move(randint(increment - 1, increment + 1)) for droplet in chunk]\n\n def _get_bounds(self, chunk_index, index, up_1, up_2, down_1, down_2):\n if chunk_index % 2 == 0:\n if index % 2 == 0:\n return up_1, up_2\n else:\n return down_1, down_2\n else:\n if index % 2 == 0:\n return down_1, down_2\n else:\n return up_1, up_2\n\n def _get_water_height(self):\n return self._height - max([droplet.position() for droplet in self._points])\n\n def _finish_animation(self):\n self.itemconfig('text', text='Welcome to ' + APP_TITLE, font=(APP_FONT, 30, 'bold'))\n self.update()\n self.after(500)\n LoginPage(self.master).grid(row=0, column=0, sticky='nesw')\n self.destroy()\n","repo_name":"Ernxst/Flat-UI-Concept","sub_path":"src/ui/SplashScreen.py","file_name":"SplashScreen.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22141123270","text":"# Faça um programa que lê as duas notas parciais obtidas por um aluno numa disciplina ao longo de um semestre,\n# e calcule a sua média. A atribuição de conceitos obedece à tabela abaixo:\n\n\nnota1 = input(\"Digite a primeira nota: \").replace(',', '.')\nnota2 = input(\"Digite a segunda nota: \").replace(',', '.')\nnota1 = float(nota1)\nnota2 = float(nota2)\n\nmedia = (nota1 + nota2) / 2\n\nif media <= 4:\n print(f'Sua nota media foi {media} seu conceito {\"E\"}')\nelif 4 < media <= 6:\n print(f'Sua nota media foi {media} seu conceito {\"D\"}')\nelif 6 < media <= 7.5:\n print(f'Sua nota media foi {media} seu conceito {\"C\"}')\nelif 7.5 < media <= 9:\n print(f'Sua nota media foi {media} seu conceito {\"B\"}')\nelif 9 < media <= 10:\n print(f'Sua nota media foi {media} seu conceito {\"A\"}')\nelse:\n print(\"Sua media esta invalida\")\n","repo_name":"TassioSales/Python_Brasil_exercicios","sub_path":"2 - EstruturaDeDecisao/exercicio_14.py","file_name":"exercicio_14.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"27175767530","text":"from rest_framework import serializers\nfrom .models import (\n DriverCondition,\n DriverRequirement,\n Company\n)\nclass CompanySearializer(serializers.ModelSerializer):\n class Meta:\n model = Company\n fields = (\n 'id',\n 'rental_conditions',\n )\n\nclass DriverConditionSearializer(serializers.ModelSerializer):\n class Meta:\n model = DriverCondition\n fields = (\n 'id',\n 'description',\n 'deposit_fee',\n 'kilometer_limit',\n 'valet_fee',\n )\n\nclass DriverRequirementSearializer(serializers.ModelSerializer):\n class Meta:\n model = DriverRequirement\n fields = (\n 'id',\n 'description',\n 'min_driver_age',\n 'min_years_of_license',\n 'min_young_driver_age',\n 'min_years_of_youth_drivers_license',\n )\n\nclass CompanyDetailSearializer(serializers.ModelSerializer):\n driver_requirements = DriverRequirementSearializer()\n driver_conditions = DriverConditionSearializer()\n class Meta:\n model = Company\n fields = (\n 'id',\n 'name',\n 'rental_conditions',\n 'address',\n 'driver_conditions',\n 'driver_requirements', \n )","repo_name":"zhunus1/turla_backend","sub_path":"companies/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73198830985","text":"\r\n\"\"\"Program with gamma function\"\"\"\r\nimport unittest\r\nimport math\r\n\r\nPOS = \"Positive gamma\"\r\nNEG = \"Negative gamma\"\r\nINV = \"Not valid input\"\r\n\r\ndef results(x):\r\n \"\"\"Gamma from numbers\"\"\"\r\n print(\"Input: \",x)\r\n result = \"\"\r\n if (type(x) == int or type(x) == float):\r\n num = math.gamma(float(x))\r\n if num> 0:\r\n result = POS\r\n else:\r\n result = NEG\r\n else:\r\n result = INV\r\n print(result,\"\\n\")\r\n return result\r\n\r\n\r\nclass TestDevisionResults(unittest.TestCase):\r\n \"\"\"input testing\"\"\"\r\n def test_positive_gamma(self):\r\n \"\"\"for positive\"\"\"\r\n self.assertEqual(results(8), POS)\r\n\r\n def test_invalid(self):\r\n \"\"\"for invalid input\"\"\"\r\n self.assertEqual(results(\"d\"), INV)\r\n\r\n def test_negative_gamma(self):\r\n \"\"\"for negative\"\"\"\r\n self.assertEqual(results(-0.1), NEG)\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"Kotliarenko02/Software_Metrics_Exam","sub_path":"SW_Exam.py","file_name":"SW_Exam.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17613312490","text":"import numpy as np\nimport tensorflow as tf\n\n\ndef estimate_sigma(arr, depth):\n k = np.ones((3, 3, depth, 1), dtype=np.float32) / 2\n k[0, 0, :, :] = 0.\n k[2, 0, :, :] = 0.\n k[0, 2, :, :] = 0.\n k[2, 2, :, :] = 0.\n k[1, 1, :, :] = 0.\n\n k = tf.constant(k)\n arr = tf.to_float(arr)\n\n out = tf.nn.depthwise_conv2d(arr, k, [1, 1, 1, 1], 'SAME')\n\n out += tf.transpose(tf.nn.depthwise_conv2d(tf.transpose(arr,\n perm=[2, 0, 1, 3]),\n k, [1, 1, 1, 1], 'SAME'),\n perm=[1, 2, 0, 3])\n\n out += tf.transpose(tf.nn.depthwise_conv2d(tf.transpose(arr,\n perm=[1, 2, 0, 3]),\n k, [1, 1, 1, 1], 'SAME'),\n perm=[2, 0, 1, 3])\n\n out /= 6.\n\n f = tf.constant(np.sqrt(6./7.).astype(np.float32))\n out = f * (arr - out)\n out = tf.sqrt(tf.reduce_mean(tf.square(out), [0, 1, 2]))\n\n return out\n\n\ndef nlmeans(arr, sigmas, sb, depth, p=1, b=5):\n\n sigma = tf.ones_like(arr) * sigmas\n\n arr = tf.pad(arr, [[b, b], [b, b], [b, b], [0, 0]], \"REFLECT\")\n\n # arr = tf.to_float(arr)\n sumw = tf.zeros(sb)\n new_values = tf.zeros(sb)\n patch_vol_size = (2*p+1)**3\n\n add_filter2d = tf.ones([2*p+1, 2*p+1, depth, 1])\n add_filter1d = tf.ones([2*p+1, 1, depth, 1])\n\n center_block = arr[b - p: b - p + sb[0] + 2 * p,\n b - p: b - p + sb[1] + 2 * p,\n b - p: b - p + sb[2] + 2 * p, :]\n\n sigma_c = tf.nn.depthwise_conv2d(tf.pad(sigma, [[b, b], [b, b], [b, b], [0, 0]], \"REFLECT\"),\n add_filter2d, [1, 1, 1, 1], 'SAME')\n sigma_c = tf.nn.depthwise_conv2d(tf.transpose(sigma_c, perm=[2, 0, 1, 3]),\n add_filter1d, [1, 1, 1, 1], 'SAME')\n sigma_c = tf.transpose(sigma_c, perm=[1, 2, 0, 3])\n denom = tf.sqrt(2.) * tf.square(sigma_c) / patch_vol_size\n\n for m in range(p, 2 * b + 1 - p):\n for n in range(p, 2 * b + 1 - p):\n for o in range(p, 2 * b + 1 - p):\n\n this_block = arr[m - p: m - p + sb[0] + 2 * p,\n n - p: n - p + sb[1] + 2 * p,\n o - p: o - p + sb[2] + 2 * p, :]\n\n d = tf.square(center_block - this_block)\n\n summs = tf.nn.depthwise_conv2d(d, add_filter2d, [1, 1, 1, 1],\n 'SAME')\n summs = tf.nn.depthwise_conv2d(\n tf.transpose(summs, perm=[2, 0, 1, 3]), add_filter1d,\n [1, 1, 1, 1], 'SAME')\n\n summs = tf.transpose(summs, perm=[1, 2, 0, 3])[p: p+sb[0],\n p: p+sb[1],\n p: p+sb[2],\n :]\n\n ws = tf.exp(-summs / denom[m: m + sb[0],\n n: n + sb[1],\n o: o + sb[2],\n :])\n\n sumw += ws\n new_values += ws * tf.square(arr[m: m + sb[0],\n n: n + sb[1],\n o: o + sb[2],\n :])\n\n new_values *= tf.to_float(tf.greater(sumw, 0))\n new_values /= (sumw + tf.to_float(tf.equal(sumw, 0)))\n new_values -= 2 * sigma\n new_values *= tf.to_float(tf.greater(new_values, 0))\n return tf.sqrt(new_values)\n\n\n","repo_name":"uwdb/image_analytics","sub_path":"neuroscience/tensorflow/denoise.py","file_name":"denoise.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"37620779375","text":"#Faça um Programa que peça um valor e mostre na tela se o valor é positivo ou negativo.\n\nnumero = input(\"digite um valor\")\nif \".\" in numero:\n numero = float(numero)\nelse:\n numero = int(numero)\n\nif numero > 0:\n print(\"o numero é positivo\")\nelif numero<0:\n print(\"o numero é negativo\")\nelse:\n print (\"zero não é positivo nem negativo\")\n","repo_name":"samuelcm/estrutura_decisao","sub_path":"positivo_negativo.py","file_name":"positivo_negativo.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14640079701","text":"def dust(number):\n if number >= 151:\n return \"REALLY_BAD\"\n elif 150 >= number >= 81:\n return \"BAD\"\n elif 80 >= number >= 31:\n return \"NORMAL\"\n else:\n return \"GOOD\"\n\n\ndef micro(number):\n if number >= 76:\n return \"REALLY_BAD\"\n elif 75 >= number >= 36:\n return \"BAD\"\n elif 35 >= number >= 16:\n return \"NORMAL\"\n else:\n return \"GOOD\"\n\n\ndef new_mask():\n return [True, 0]\n\n\ndef solution(atmos):\n mask_on = [\"REALLY_BAD\", \"BAD\"]\n mask_count = 0\n mask_day = 0 # 사용일부터 1\n for d, m, in atmos:\n dust_result = dust(d)\n micro_result = micro(m)\n if dust_result == \"REALLY_BAD\" and micro_result == \"REALLY_BAD\":\n if mask_day == 0:\n mask_count += 1\n mask_day = 0\n # 이미 마스크를 끼고 있는데, 이틀 전이라면 그대로 쓰면 된다. -> 그리고 버리면 된다.\n # 근데 마스크가 없다면 무조건 새로 끼고 버려야한다.\n # 공통된 로직은 마스크를 끼고, 버린다.\n # 차이점은 마스크가 없다면, 새로 마스크를 생성하고, 이미 마스크가 있다면 새로 생성할 필요가 없다는 점\n continue\n if dust_result in mask_on or micro_result in mask_on:\n if mask_day == 0:\n mask_count += 1\n mask_day += 1\n continue\n else:\n if mask_day >= 3:\n mask_day = 0\n continue\n else:\n mask_day += 1\n if mask_day == 3:\n mask_day = 0\n else:\n if mask_day > 0:\n mask_day += 1\n if mask_day == 3:\n mask_day = 0\n\n # if mask_day > 0:\n # mask_day += 1\n # if mask_day > 3:\n # mask_day = 0\n\n\n return mask_count\n\n\n\"\"\"\n마스크 재사용 가능\n이틀 후 까지만 재사용 가능\n-> 둘다 매우 나쁨이면 사용 X\n\n필요한 마스크 갯수\n\"\"\"\n\nprint(solution([[80, 35], [70, 38], [100, 41], [75, 30], [160, 80], [77, 29], [181, 68], [151, 76]]))\nprint(solution([[140, 90], [177, 75], [95, 45], [71, 31], [150, 30], [80, 35], [72, 33], [166, 81], [151, 75]]))\nprint(solution([[30, 15], [80, 35]]))\n","repo_name":"hugehoo/problem-solving","sub_path":"2022/2022-05/devmatch/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15456112715","text":"import sys\n\n# ignore global naming issues.\n# pylint: disable=C0103\n\nNetworkManagerState = None\nONLINE = None\nOFFLINE = None\nUNKNOWN = None\n\nif sys.platform == 'win32':\n from ubuntu_sso.networkstate import windows\n NetworkManagerState = windows.NetworkManagerState\n ONLINE = windows.ONLINE\n OFFLINE = windows.OFFLINE\n UNKNOWN = windows.UNKNOWN\nelse:\n from ubuntu_sso.networkstate import linux\n NetworkManagerState = linux.NetworkManagerState\n ONLINE = linux.ONLINE\n OFFLINE = linux.OFFLINE\n UNKNOWN = linux.UNKNOWN\n","repo_name":"Alberto-Beralix/Beralix","sub_path":"i386-squashfs-root/usr/share/pyshared/ubuntu-sso-client/ubuntu_sso/networkstate/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"28036274408","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport json\nimport csv\nimport math, random, sys\nimport numpy as np\nimport argparse\nimport os\n\nfrom structgen.protein_features import ProteinFeatures\nfrom structgen.utils import compute_rmsd, self_square_dist, gather_nodes, kabsch\nfrom collections import namedtuple\nfrom tqdm import tqdm\n\ntorch.set_num_threads(8)\n\nALPHABET = ['#', 'A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']\nReturnType = namedtuple('ReturnType',('loss','bind_X'), defaults=(None, None))\n\n\nclass AntibodyComplexDataset():\n\n def __init__(self, jsonl_file, cdr_type, L_binder, L_target, language_model=True):\n self.data = []\n with open(jsonl_file) as f:\n all_lines = f.readlines()\n for line in tqdm(all_lines):\n entry = json.loads(line)\n assert len(entry['antibody_coords']) == len(entry['antibody_seq'])\n assert len(entry['antigen_coords']) == len(entry['antigen_seq'])\n\n # Create scaffold\n if language_model:\n entry['scaffold_seq'] = ''.join([\n ('#' if y in cdr_type else x) for x,y in zip(entry['antibody_seq'], entry['antibody_cdr'])\n ])[:L_binder]\n else:\n entry['scaffold_seq'] = entry['antibody_seq'][:L_binder]\n\n entry['scaffold_coords'] = torch.tensor(entry['antibody_coords'])[:L_binder]\n entry['scaffold_atypes'] = torch.tensor(entry['antibody_atypes'])[:L_binder]\n\n # Binding region\n entry['antibody_cdr'] = entry['antibody_cdr'][:L_binder]\n surface = torch.tensor(\n [i for i,v in enumerate(entry['antibody_cdr']) if v in cdr_type]\n )\n entry['binder_surface'] = surface\n entry['binder_seq'] = ''.join([entry['antibody_seq'][i] for i in surface.tolist()])\n entry['binder_coords'] = entry['scaffold_coords'][surface]\n entry['binder_atypes'] = entry['scaffold_atypes'][surface]\n\n # Create target\n entry['target_seq'] = entry['antigen_seq']\n entry['target_coords'] = torch.tensor(entry['antigen_coords'])\n entry['target_atypes'] = torch.tensor(entry['antigen_atypes'])\n\n # Find target surface\n bind_X = entry['binder_coords'][:, 1]\n tgt_X = entry['target_coords'][:, 1]\n dist = bind_X[None,:,:] - tgt_X[:,None,:] # [1, N, 3] - [M, 1, 3]\n dist = dist.norm(dim=-1, p=2).amin(dim=-1) # [M, N] -> [M]\n _, target = dist.topk(k=min(len(dist),L_target), largest=False)\n entry['target_surface'] = target\n\n if len(entry['binder_coords']) > 4 and len(entry['target_coords']) > 4:\n self.data.append(entry)\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n return self.data[idx]\n\n\nclass ComplexLoader():\n\n def __init__(self, dataset, batch_tokens):\n self.dataset = dataset\n self.size = len(dataset)\n self.lengths = [len(dataset[i]['binder_seq']) for i in range(self.size)]\n self.batch_tokens = batch_tokens\n sorted_ix = np.argsort(self.lengths)\n\n # Cluster into batches of similar sizes\n clusters, batch = [], []\n for ix in sorted_ix:\n size = self.lengths[ix]\n batch.append(ix)\n if size * (len(batch) + 1) > self.batch_tokens:\n clusters.append(batch)\n batch = []\n\n self.clusters = clusters\n if len(batch) > 0:\n clusters.append(batch)\n\n def __len__(self):\n return len(self.clusters)\n\n def __iter__(self):\n np.random.shuffle(self.clusters)\n for b_idx in self.clusters:\n batch = [self.dataset[i] for i in b_idx]\n yield batch\n\n\ndef featurize(batch, name):\n B = len(batch)\n L_max = max([len(b[name + \"_seq\"]) for b in batch])\n X = torch.zeros([B, L_max, 14, 3])\n S = torch.zeros([B, L_max]).long()\n A = torch.zeros([B, L_max, 14]).long()\n\n # Build the batch\n for i, b in enumerate(batch):\n l = len(b[name + '_seq'])\n X[i,:l] = b[name + '_coords']\n A[i,:l] = b[name + '_atypes']\n indices = torch.tensor([ALPHABET.index(a) for a in b[name + '_seq']])\n S[i,:l] = indices\n\n return X.cuda(), S.cuda(), A.cuda()\n\n\ndef make_batch(batch):\n target = featurize(batch, 'target')\n scaffold = featurize(batch, 'scaffold')\n binder = featurize(batch, 'binder')\n surface = ([b['binder_surface'] for b in batch], [b['target_surface'] for b in batch])\n return binder, scaffold, target, surface\n\n\nclass MPNNLayer(nn.Module):\n\n def __init__(self, num_hidden, num_in, dropout):\n super(MPNNLayer, self).__init__()\n self.num_hidden = num_hidden\n self.num_in = num_in\n self.dropout = nn.Dropout(dropout)\n self.W = nn.Sequential(\n nn.Linear(num_hidden + num_in, num_hidden),\n nn.ReLU(),\n nn.Linear(num_hidden, num_hidden),\n nn.ReLU(),\n nn.Linear(num_hidden, num_hidden),\n )\n\n def forward(self, h_V, h_E, mask_attend):\n h_V_expand = h_V.unsqueeze(-2).expand(-1, -1, h_E.size(-2), -1)\n h_EV = torch.cat([h_V_expand, h_E], dim=-1) # [B, N, K, H]\n h_message = self.W(h_EV) * mask_attend.unsqueeze(-1)\n dh = torch.mean(h_message, dim=-2)\n h_V = h_V + self.dropout(dh)\n return h_V\n\n\nclass MPNEncoder(nn.Module):\n \n def __init__(self, args):\n super(MPNEncoder, self).__init__()\n self.features = ProteinFeatures(\n top_k=args.k_neighbors, num_rbf=args.num_rbf,\n features_type='full',\n direction='bidirectional'\n )\n self.node_in, self.edge_in = self.features.feature_dimensions['full']\n \n self.W_v = nn.Linear(self.node_in, args.hidden_size)\n self.W_e = nn.Linear(self.edge_in, args.hidden_size)\n self.layers = nn.ModuleList([\n MPNNLayer(args.hidden_size, args.hidden_size * 3, dropout=args.dropout)\n for _ in range(args.depth)\n ])\n for param in self.parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform_(param)\n\n def forward(self, X, V, S, A):\n mask = A.clamp(max=1).float()\n vmask = mask[:,:,1]\n _, E, E_idx = self.features(X, vmask)\n\n h = self.W_v(V) # [B, N, H] \n h_e = self.W_e(E) # [B, N, K, H] \n nei_s = gather_nodes(S, E_idx) # [B, N, K, H]\n emask = gather_nodes(vmask[...,None], E_idx).squeeze(-1)\n\n # message passing\n for layer in self.layers:\n nei_v = gather_nodes(h, E_idx) # [B, N, K, H]\n nei_h = torch.cat([nei_v, nei_s, h_e], dim=-1)\n h = layer(h, nei_h, mask_attend=emask) # [B, N, H]\n h = h * vmask.unsqueeze(-1) # [B, N, H]\n return h\n\n\nclass RefineFolder(nn.Module):\n\n def __init__(self, args):\n super(RefineFolder, self).__init__()\n self.rstep = args.rstep\n self.k_neighbors = args.k_neighbors\n self.hidden_size = args.hidden_size\n self.embedding = nn.Embedding(len(ALPHABET), args.hidden_size)\n self.rnn = nn.GRU(\n args.hidden_size,\n args.hidden_size,\n num_layers=1,\n batch_first=True,\n dropout=args.dropout,\n )\n self.features = ProteinFeatures(\n top_k=args.k_neighbors, num_rbf=args.num_rbf,\n features_type='full',\n direction='bidirectional'\n )\n self.W_x0 = nn.Linear(args.hidden_size, 42)\n self.W_x = nn.Linear(args.hidden_size, 42)\n self.struct_mpn = MPNEncoder(args)\n for param in self.parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform_(param)\n\n self.bce_loss = nn.BCEWithLogitsLoss(reduction='none')\n self.ce_loss = nn.CrossEntropyLoss(reduction='none')\n self.mse_loss = nn.MSELoss(reduction='none')\n self.huber_loss = nn.SmoothL1Loss(reduction='none')\n\n def encode_scaffold(self, h_S, mask, bind_pos):\n scaf_h, _ = self.rnn(h_S)\n max_len = max([len(pos) for pos in bind_pos])\n bind_h = [scaf_h[i, pos] for i,pos in enumerate(bind_pos)]\n bind_h = [F.pad(h, (0,0,0,max_len-len(h))) for h in bind_h]\n return torch.stack(bind_h, dim=0), scaf_h\n\n def struct_loss(self, X, mask, true_D, true_V, true_AD):\n D, _ = self_square_dist(X, mask[:,:,1])\n V = self.features._dihedrals(X)\n AD = self.features._AD_features(X[:,:,1,:])\n dloss = self.huber_loss(D, true_D) + 20 * F.relu(14.4 - D)\n vloss = self.mse_loss(V, true_V).sum(dim=-1)\n aloss = self.mse_loss(AD, true_AD).sum(dim=-1)\n return dloss, vloss + aloss\n\n def forward(self, binder, scaffold, surface):\n true_X, true_S, true_A = binder\n _, scaf_S, scaf_A = scaffold\n surface, _ = surface\n true_mask = true_A.clamp(max=1).float()\n\n # Ground truth \n B, N, L = true_X.size(0), true_X.size(1), true_X.size(2)\n true_V = self.features._dihedrals(true_X)\n true_D, mask_2D = self_square_dist(true_X, true_mask[:,:,1])\n true_AD = self.features._AD_features(true_X[:,:,1,:])\n\n # Initial coords\n scaf_S = self.embedding(scaf_S)\n scaf_mask = scaf_A[:,:,1].clamp(max=1).float()\n scaf_h, _ = self.encode_scaffold(scaf_S, scaf_mask, surface)\n\n X = self.W_x0(scaf_h).view(B, N, L, 3)\n dloss, vloss = self.struct_loss(X, true_mask, true_D, true_V, true_AD)\n\n for t in range(self.rstep):\n X = X.detach().clone()\n V = self.features._dihedrals(X)\n h = self.struct_mpn(X, V, scaf_h, true_A)\n X = self.W_x(h).view(B, N, L, 3)\n X = X * true_mask[...,None]\n dloss_t, vloss_t = self.struct_loss(X, true_mask, true_D, true_V, true_AD)\n dloss += dloss_t\n vloss += vloss_t\n\n dloss = torch.sum(dloss * mask_2D) / mask_2D.sum()\n vloss = torch.sum(vloss * true_mask[:,:,1]) / true_mask[:,:,1].sum()\n loss = dloss + vloss\n return ReturnType(loss=loss, bind_X=X.detach())\n\n\ndef evaluate(model, loader, args):\n model.eval()\n bb_rmsd = []\n with torch.no_grad():\n for batch in tqdm(loader):\n binder, scaffold, target, surface = make_batch(batch)[:4]\n true_X, _, true_A = binder\n true_mask = true_A.clamp(max=1).float()\n out = model(binder, scaffold, surface)\n rmsd = compute_rmsd(\n out.bind_X[:, :, 1], true_X[:, :, 1], true_mask[:, :, 1]\n )\n bb_rmsd.extend(rmsd.tolist())\n\n return sum(bb_rmsd) / len(bb_rmsd)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--train_path', default='data/sabdab_2022_01/train_data.jsonl')\n parser.add_argument('--val_path', default='data/sabdab_2022_01/val_data.jsonl')\n parser.add_argument('--test_path', default='data/sabdab_2022_01/test_data.jsonl')\n parser.add_argument('--save_dir', default='ckpts/tmp')\n parser.add_argument('--load_model', default=None)\n\n parser.add_argument('--cdr', default='123')\n\n parser.add_argument('--hidden_size', type=int, default=256)\n parser.add_argument('--batch_tokens', type=int, default=200)\n parser.add_argument('--k_neighbors', type=int, default=9)\n parser.add_argument('--L_binder', type=int, default=150)\n parser.add_argument('--L_target', type=int, default=200)\n parser.add_argument('--depth', type=int, default=4)\n parser.add_argument('--rstep', type=int, default=4)\n parser.add_argument('--vocab_size', type=int, default=21)\n parser.add_argument('--num_rbf', type=int, default=16)\n parser.add_argument('--dropout', type=float, default=0.1)\n\n parser.add_argument('--lr', type=float, default=1e-3)\n parser.add_argument('--epochs', type=int, default=10)\n parser.add_argument('--seed', type=int, default=7)\n parser.add_argument('--print_iter', type=int, default=50)\n parser.add_argument('--anneal_rate', type=float, default=0.9)\n parser.add_argument('--clip_norm', type=float, default=1.0)\n\n args = parser.parse_args()\n print(args)\n\n os.makedirs(args.save_dir, exist_ok=True)\n\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n\n all_data = []\n for path in [args.train_path, args.val_path, args.test_path]:\n data = AntibodyComplexDataset(\n path,\n cdr_type=args.cdr,\n L_binder=args.L_binder,\n L_target=args.L_target,\n language_model=False\n )\n all_data.append(data)\n\n loader_train = ComplexLoader(all_data[0], batch_tokens=args.batch_tokens)\n loader_val = ComplexLoader(all_data[1], batch_tokens=0)\n loader_test = ComplexLoader(all_data[2], batch_tokens=0)\n\n model = RefineFolder(args).cuda()\n optimizer = torch.optim.Adam(model.parameters())\n\n if args.load_model:\n model_ckpt, opt_ckpt, model_args = torch.load(args.load_model)\n model = RefineFolder(model_args).cuda() # new argument\n optimizer = torch.optim.Adam(model.parameters())\n model.load_state_dict(model_ckpt)\n optimizer.load_state_dict(opt_ckpt)\n\n print('Training:{}, Validation:{}, Test:{}'.format(\n len(loader_train.dataset), len(loader_val.dataset), len(loader_test.dataset))\n )\n\n best_rmsd, best_epoch = 100, -1\n for e in range(args.epochs):\n model.train()\n meter = 0\n\n for i,batch in enumerate(tqdm(loader_train)):\n optimizer.zero_grad()\n binder, scaffold, target, surface = make_batch(batch)[:4]\n out = model(binder, scaffold, surface)\n out.loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), args.clip_norm)\n optimizer.step()\n\n meter += out.loss.item()\n if (i + 1) % args.print_iter == 0:\n meter /= args.print_iter\n print(f'[{i + 1}] Train Loss = {meter:.3f}')\n meter = 0\n\n val_rmsd = evaluate(model, loader_val, args)\n ckpt = (model.state_dict(), optimizer.state_dict(), args)\n torch.save(ckpt, os.path.join(args.save_dir, f\"model.ckpt.{e}\"))\n print(f'Epoch {e}, Backbone RMSD = {val_rmsd:.3f}')\n\n if val_rmsd < best_rmsd:\n best_rmsd = val_rmsd\n best_epoch = e\n\n if best_epoch >= 0:\n best_ckpt = os.path.join(args.save_dir, f\"model.ckpt.{best_epoch}\")\n model.load_state_dict(torch.load(best_ckpt)[0])\n\n test_rmsd = evaluate(model, loader_test, args)\n print(f'Test Backbone RMSD = {test_rmsd:.3f}')\n","repo_name":"wengong-jin/RefineGNN","sub_path":"fold_train.py","file_name":"fold_train.py","file_ext":"py","file_size_in_byte":15076,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"81"} +{"seq_id":"22244456834","text":"from data_stark import*\n\ndef listado_heroes ()->str:\n \"\"\"Recorre una lista con nombres\n\n Returns:\n str: devuelve un str\n \"\"\"\n for heroe in lista_heroes:\n print(\"Nombre del Heroe: \")\n print(\"\")\n print(heroe[\"nombre\"])\n print(\"\")\n return heroe\n\ndef listado_nombre_y_altura ():\n for heroe in lista_heroes:\n print(\"Nombre y Altura del Heroe: \")\n print(\"\")\n print(f\"{heroe['nombre']} - Altura: {heroe['altura']} cm\")\n print(\"\")\n return heroe\n\ndef heroe_mas_alto ()->float:\n max_altura = float(lista_heroes[0][\"altura\"])\n superheroe_mas_alto = lista_heroes[0][\"nombre\"]\n\n for heroe in lista_heroes:\n altura_actual = float(heroe[\"altura\"])\n if altura_actual > max_altura:\n superheroe_mas_alto = heroe[\"nombre\"]\n \n print(\"El heroe mas alto es:\")\n print(\"\")\n print(f\"**{superheroe_mas_alto}**\")\n print(\"\")\n return heroe\n\ndef heroe_menos_alto ():\n\n min_altura = float(lista_heroes[0][\"altura\"])\n superheroe_menos_alto = lista_heroes[0][\"nombre\"]\n\n for heroe in lista_heroes:\n altura_actual = float(heroe[\"altura\"])\n if altura_actual < min_altura:\n superheroe_menos_alto = heroe[\"nombre\"]\n\n print(\"El heroe menos alto es:\")\n print(\"\")\n print(f\"**{superheroe_menos_alto}**\")\n print(\"\")\n return heroe\n\ndef promedios_altura ():\n\n total_alturas = 0\n cantidad_heroe = 0\n\n for heroe in lista_heroes:\n total_alturas += float(heroe[\"altura\"])\n cantidad_heroe += 1\n\n altura_promedio = total_alturas // cantidad_heroe\n\n print(\"La altura promedio de los superheroes es: \")\n print(\"\")\n print(f\"{altura_promedio:.2f}\")\n print(\"\")\n return heroe\n\ndef comparativa_de_alturas ():\n min_altura = float(lista_heroes[0][\"altura\"])\n superheroe_menos_alto = lista_heroes[0][\"nombre\"]\n\n for heroe in lista_heroes:\n altura_actual = float(heroe[\"altura\"])\n if altura_actual < min_altura:\n superheroe_menos_alto = heroe[\"nombre\"]\n\n max_altura = float(lista_heroes[0][\"altura\"])\n superheroe_mas_alto = lista_heroes[0][\"nombre\"]\n\n for heroe in lista_heroes:\n altura_actual = float(heroe[\"altura\"])\n if altura_actual > max_altura:\n superheroe_mas_alto = heroe[\"nombre\"]\n\n print(\"El heroe mas alto es: \")\n print(\"\")\n print(superheroe_mas_alto[0])\n print(\"\")\n print(\"El heroe mas bajo es: \")\n print(\"\")\n print(superheroe_menos_alto[0])\n print(\"\")\n return heroe\n\ndef comparativa_pesos ():\n\n peso_mas_pesado = float(lista_heroes[0][\"peso\"])\n peso_menos_pesado = float(lista_heroes[0][\"peso\"])\n superheroe_mas_pesado = lista_heroes[0][\"nombre\"]\n superheroe_menos_pesado = lista_heroes[0][\"nombre\"]\n\n for heroe in lista_heroes:\n peso_actual = float(heroe[\"peso\"])\n if peso_actual > peso_mas_pesado:\n peso_mas_pesado = peso_actual\n superheroe_mas_pesado = heroe[\"nombre\"]\n else:\n peso_menos_pesado = peso_actual\n superheroe_menos_pesado = heroe[\"nombre\"]\n\n print(\"\")\n print(\"El heroe mas pesado es: \")\n print(\"\")\n print(superheroe_mas_pesado)\n print(\"\")\n print(\"El heroe menos pesado es: \")\n print(\"\")\n print(superheroe_menos_pesado)\n return heroe\n","repo_name":"gonzalo12121212/T.P.Heroes00","sub_path":"T.P_Heroes/Tp_Heroes.py","file_name":"Tp_Heroes.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21001760230","text":"import itertools\nfrom utils import WHITE, BLACK, convert_l_n_to_indexes, convert_indexes_to_l_n, is_within_board\n\n\nclass Piece:\n newid = itertools.count()\n\n def __init__(self, name, sign, color):\n self.id = next(Piece.newid)\n self.name = name\n self.sign = sign\n self.color = color\n\n def __repr__(self):\n return str(self.id) + \" \" + self.sign\n\n def get_color(self):\n return self.color\n\n def get_sign(self):\n return self.sign\n\n @staticmethod\n def calculate_sign(num, color):\n char_num = num + 6 if color == WHITE else num\n return \" \" + chr(char_num)\n\n def prepare_x_y_initial(self, current_field):\n letter = current_field[0]\n number = current_field[1]\n return convert_l_n_to_indexes(letter, number)\n\n def get_possible_moves(self, current_field, board, modifications, limit=None):\n x_initial, y_initial = self.prepare_x_y_initial(current_field)\n possible_moves = []\n for modification in modifications:\n x = x_initial\n y = y_initial\n stop = False\n while not stop:\n x, y = modification(x, y)\n if is_within_board(x, y):\n l, n = convert_indexes_to_l_n(x, y)\n piece = board.get_piece_from_square(l, n)\n # free square without piece\n if not piece:\n possible_moves.append((l, n))\n # enemy piece encountered\n elif piece and piece.get_color() != self.color:\n possible_moves.append((l, n))\n break\n # ally piece encountered\n else:\n break\n # for king only:\n if limit:\n break\n else:\n break\n return possible_moves\n\n def go_left(self, x, y):\n return x - 1, y\n\n def go_right(self, x, y):\n return x + 1, y\n\n def go_down(self, x, y):\n return x, y - 1\n\n def go_up(self, x, y):\n return x, y + 1\n\n def go_left_up(self, x, y):\n return x - 1, y + 1\n\n def go_right_up(self, x, y):\n return x + 1, y + 1\n\n def go_left_down(self, x, y):\n return x - 1, y - 1\n\n def go_right_down(self, x, y):\n return x + 1, y - 1\n\n\nclass Pawn(Piece):\n\n def __init__(self, color):\n name = \"Pawn\"\n sign = self.calculate_sign(9817, color)\n super(Pawn, self).__init__(name, sign, color)\n self.en_passant = False\n\n def set_en_passant_possible(self):\n self.en_passant = True\n\n def get_en_passant_possible(self):\n return self.en_passant\n\n def get_possible_moves(self, current_field, board):\n x_initial, y_initial = super(Pawn, self).prepare_x_y_initial(current_field)\n possible_moves = []\n if self.get_color() == BLACK:\n sign = -1\n else:\n sign = 1\n\n # double step forward when initial position\n if (self.get_color() == WHITE and y_initial == 1) or (self.get_color() == BLACK and y_initial == 6):\n x = x_initial\n y = y_initial + 2 * sign\n if is_within_board(x, y):\n l, n = convert_indexes_to_l_n(x, y)\n occupied = board.check_if_occupied(l, n)\n if not occupied:\n possible_moves.append((l, n))\n\n # step forward\n x = x_initial\n y = y_initial + 1 * sign\n if is_within_board(x, y):\n l, n = convert_indexes_to_l_n(x, y)\n occupied = board.check_if_occupied(l, n)\n if not occupied:\n possible_moves.append((l, n))\n\n # attack to right\n x = x_initial + 1\n y = y_initial + 1 * sign\n if is_within_board(x, y):\n l, n = convert_indexes_to_l_n(x, y)\n piece = board.get_piece_from_square(l, n)\n if piece and piece.get_color() != self.color:\n possible_moves.append((l, n))\n\n # attack to left\n x = x_initial - 1\n y = y_initial + 1 * sign\n\n if is_within_board(x, y):\n l, n = convert_indexes_to_l_n(x, y)\n piece = board.get_piece_from_square(l, n)\n if piece and piece.get_color() != self.color:\n possible_moves.append((l, n))\n\n # en_passant\n pass\n\n return possible_moves\n\n\nclass Knight(Piece):\n\n def __init__(self, color):\n name = \"Knight\"\n sign = self.calculate_sign(9816, color)\n super(Knight, self).__init__(name, sign, color)\n\n def get_possible_moves(self, current_field, board):\n possible_moves = []\n x_initial, y_initial = super(Knight, self).prepare_x_y_initial(current_field)\n \"\"\"\n # #\n # #\n O\n # #\n # # \n \"\"\"\n x = x_initial\n y = y_initial\n possibilities = ((x - 1, y + 2), (x + 1, y + 2),\n (x - 2, y + 1), (x + 2, y + 1),\n (x - 2, y - 1), (x + 2, y - 1),\n (x - 1, y - 2), (x + 1, y - 2))\n for (x, y) in possibilities:\n if is_within_board(x, y):\n l, n = convert_indexes_to_l_n(x, y)\n piece = board.get_piece_from_square(l, n)\n if (not piece) or (piece and piece.get_color() != self.color):\n possible_moves.append((l, n))\n return possible_moves\n\n\nclass Bishop(Piece):\n\n def __init__(self, color):\n name = \"Bishop\"\n sign = self.calculate_sign(9814, color)\n super(Bishop, self).__init__(name, sign, color)\n\n def get_possible_moves(self, current_field, board):\n \"\"\"\n #\n #\n #\n # # # O # # #\n #\n #\n #\n \"\"\"\n movement = (self.go_left, self.go_right, self.go_down, self.go_up)\n return super(Bishop, self).get_possible_moves(current_field, board, movement)\n\n\nclass Rook(Piece):\n\n def __init__(self, color):\n name = \"Rook\"\n sign = self.calculate_sign(9815, color)\n super(Rook, self).__init__(name, sign, color)\n\n def get_possible_moves(self, current_field, board):\n \"\"\"\n # #\n # #\n O\n # #\n # #\n \"\"\"\n movement = [self.go_left_up, self.go_right_up, self.go_left_down, self.go_right_down]\n return super(Rook, self).get_possible_moves(current_field, board, movement)\n\n\nclass Queen(Piece):\n\n def __init__(self, color):\n name = \"Queen\"\n sign = self.calculate_sign(9813, color)\n super(Queen, self).__init__(name, sign, color)\n\n def get_possible_moves(self, current_field, board):\n \"\"\"\n # # #\n # # #\n # # # O # # #\n # # #\n # # #\n \"\"\"\n movement = (self.go_left_up, self.go_right_up, self.go_left_down, self.go_right_down,\n self.go_left, self.go_right, self.go_down, self.go_up)\n return super(Queen, self).get_possible_moves(current_field, board, movement)\n\n\nclass King(Piece):\n\n def __init__(self, color):\n name = \"King\"\n sign = self.calculate_sign(9812, color)\n super(King, self).__init__(name, sign, color)\n self.en_passant_possible = True\n\n def get_possible_moves(self, current_field, board):\n \"\"\"\n # # #\n # O #\n # # #\n \"\"\"\n movement = (self.go_left_up, self.go_right_up, self.go_left_down, self.go_right_down,\n self.go_left, self.go_right, self.go_down, self.go_up)\n limit = 1\n possible_moves = super(King, self).get_possible_moves(current_field, board, movement, limit)\n if self.get_color() == WHITE:\n self.check_castling_possible(board, 'a', 1, WHITE, ['b', 'c', 'd'], possible_moves)\n self.check_castling_possible(board, 'h', 1, WHITE, ['g', 'f'], possible_moves)\n elif self.get_color() == BLACK:\n self.check_castling_possible(board, 'a', 8, BLACK, ['b', 'c'], possible_moves)\n self.check_castling_possible(board, 'h', 8, BLACK, ['g', 'f', 'e'], possible_moves)\n return possible_moves\n\n def check_castling_possible(self, board, l, n, color, to_be_empty, possible_moves):\n fields_empty = True\n for letter in to_be_empty:\n if board.get_piece_from_square(letter, n):\n fields_empty = False\n if self.get_color() == color and \\\n isinstance(board.get_piece_from_square(l, n), Bishop) and \\\n board.get_piece_from_square(l, n).get_color() == color and \\\n fields_empty:\n print(\"Castling possible!\")\n possible_moves.append((l, n))\n return True\n","repo_name":"Michal-lis/python_playground","sub_path":"Chess/Chess/Piece.py","file_name":"Piece.py","file_ext":"py","file_size_in_byte":8979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27106021205","text":"from sangreal_wind.utils.engines import WIND_DB\nfrom sangreal_wind.utils.fund_type import FUND_TYPE\nfrom functools import lru_cache\nfrom collections import Iterable\n\nFUND_TYPE_LEVEL0 = ['股票型基金', '混合型基金', '债券型基金']\n\n\n@lru_cache()\ndef get_fund_list():\n table0 = getattr(WIND_DB, 'ChinaMutualFundSector'.upper())\n table1 = getattr(WIND_DB, 'AShareIndustriesCode'.upper())\n df = WIND_DB.query(table0.F_INFO_WINDCODE, table0.S_INFO_SECTORENTRYDT,\n table0.S_INFO_SECTOREXITDT,\n table1.INDUSTRIESNAME).filter(\n table0.S_INFO_SECTOR == table1.INDUSTRIESCODE,\n table0.S_INFO_SECTORENTRYDT != None,\n table0.S_INFO_SECTOREXITDT == None).to_df()\n df.columns = [c.lower() for c in df.columns]\n df = df[df['INDUSTRIESNAME'.lower()].isin(FUND_TYPE)]\n df.columns = ['sid', 'entry_dt', 'exit_dt', 'fund_type']\n return df\n\n\ndef get_fund_filter(fundtype='all'):\n \"\"\"[选取同一类型下的基金]\n \n Keyword Arguments:\n fundtype {str} -- [基金类型] (default: {'all'})\n \n Raises:\n ValueError -- [description]\n \n Returns:\n [pd.Series] -- [Series of fund]\n \"\"\"\n\n df = get_fund_list()\n if fundtype == 'all':\n return df.sid\n elif fundtype == '股票型':\n return df[df['fund_type'].isin((\n '普通股票型基金',\n '被动指数型基金',\n '增强指数型基金',\n ))].sid\n elif fundtype == '混合型':\n return df[df['fund_type'].isin((\n '偏股混合型基金',\n '平衡混合型基金',\n '偏债混合型基金',\n '灵活配置型基金',\n ))].sid\n elif fundtype == '债券型':\n return df[df['fund_type'].isin((\n '中长期纯债型基金',\n '短期纯债型基金',\n '混合债券型一级基金',\n '混合债券型二级基金',\n '被动指数型债券基金',\n '增强指数型债券基金',\n ))].sid\n elif isinstance(fundtype, str):\n tmp_f = fundtype.rstrip('基金') + '基金'\n return df[df['fund_type'] == tmp_f].sid\n elif isinstance(fundtype, Iterable):\n tmp_fundtype = [f.rstrip('基金') + '基金' for f in fundtype]\n return df[df['fund_type'].isin(tmp_fundtype)].sid\n else:\n raise ValueError(f'请输入正确的基金类型! 如{FUND_TYPE_LEVEL0 + FUND_TYPE}')\n\n\nif __name__ == '__main__':\n print(get_fund_filter('all').head())\n print(get_fund_filter('债券型').head())\n print(get_fund_filter('中长期纯债型基').head())\n print(get_fund_filter(['中长期纯债型基', '中长期纯债型基']).head())\n","repo_name":"liubola/sangreal-wind","sub_path":"sangreal_wind/api/get_fund_list.py","file_name":"get_fund_list.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2811366912","text":"\ndef is_square(x):\n return int(x ** 0.5) ** 2 == x\n\n\ndef main():\n DIGITS = 20\n\n targets_dict = [{} for _ in range(DIGITS)]\n targets_dict[0] = {i**2: [(i, 0)] for i in range(10)}\n for d in range(1, DIGITS):\n current = targets_dict[d]\n\n for i in range(10):\n for old_target in targets_dict[d-1]:\n new_target = old_target + i**2\n current.setdefault(new_target, []).append((i, old_target))\n\n # for i in range(5):\n # for j in sorted(targets_dict[i]):\n # print(j, targets_dict[i][j], end=' ')\n # print()\n\n times_used_cache = [{} for _ in range(DIGITS)]\n times_used_cache[0] = {i**2: [1] for i in range(10)}\n\n def get_times_used_count(target, digits_left):\n if times_used_cache[digits_left].get(target, []):\n return sum(times_used_cache[digits_left][target])\n\n target_counts = []\n for curr, last_target in targets_dict[digits_left].get(target, []):\n curr_target_count = get_times_used_count(last_target,\n digits_left-1)\n target_counts.append(curr_target_count)\n times_used_cache[digits_left][target] = target_counts\n\n return sum(target_counts)\n\n for t in targets_dict[DIGITS-1]:\n get_times_used_count(t, DIGITS-2)\n\n # for i in range(5):\n # for j in sorted(times_used_cache[i]):\n # print(j, times_used_cache[i][j], end=' ')\n # print()\n\n sum_cache = [{} for _ in range(DIGITS)]\n sum_cache[0] = {i**2: [i] for i in range(10)}\n\n def get_sum(target, digits_left):\n if sum_cache[digits_left].get(target, 0) or digits_left == 0:\n return sum(sum_cache[digits_left][target])\n\n target_sums = []\n for curr, last_target in targets_dict[digits_left].get(target, []):\n last_target_sum = get_sum(last_target, digits_left-1)\n # print(curr, last_target, times_used_cache[digits_left][last_target])\n curr_digit_sum = last_target_sum + \\\n curr * 10**digits_left * \\\n sum(times_used_cache[digits_left-1][last_target])\n target_sums.append(curr_digit_sum)\n sum_cache[digits_left][target] = target_sums\n\n return sum(target_sums)\n\n for t in targets_dict[DIGITS-1]:\n get_sum(t, DIGITS-1)\n\n # for i in range(5):\n # for j in sorted(sum_cache[i]):\n # print(j, sum_cache[i][j], end=' ')\n # print()\n\n total = 0\n for d in range(DIGITS):\n for t in targets_dict[d]:\n if is_square(t):\n for (curr_digit, old_target), target_sum in \\\n zip(targets_dict[d][t], sum_cache[d][t]):\n if curr_digit != 0:\n total += target_sum\n\n print(total)\n print(str(total)[-9:])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"colinxy/ProjectEuler","sub_path":"Python/project_euler171.py","file_name":"project_euler171.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34771636290","text":"\nclass Student:\n def __init__(self, name, surname, age, birthday, city, country, group_number):\n \n self.__name = name\n self.__surname = surname\n self.__age = age\n self.__birthday = birthday\n self.__city = city\n self.__country = country\n self.__group_number = group_number\n \n \n \n @property\n def name(self):\n return self.__name\n \n @name.setter\n def name(self, new_name):\n self.__name = new_name\n \n \n @property\n def surname(self):\n return self.__surname\n\n @surname.setter\n def surname(self, new_surname):\n self.__surname = new_surname\n \n @property\n def age(self):\n return self.__age\n \n @age.setter\n def age(self, new_age):\n if new_age <= 0 or new_age >= 120: \n print (\"Out of range. '0-120'\")\n else:\n self.__age = new_age\n \n @property\n def birthday(self):\n return self.__birthday\n \n \n @property\n def city(self):\n return self.__city\n @city.setter\n def city(self, new_city):\n self.__city = new_city\n \n \n @property\n def country(self):\n return self.__country\n @country.setter\n def country(self, new_country):\n self.__city = new_country\n \n \n @property\n def group_number(self):\n return self.__group_number\n\n @group_number.setter\n def group_number(self, new_group_number):\n if new_group_number <=0 or new_group_number > 30: \n print (\"Out of range. '1-30'\")\n else:\n self.__group_number = new_group_number\n \n def show_student_info(self):\n print(\"{}, {} , {} , {} \\n\".format(self.__name,self.__surname,self.__age, self.__birthday))\n print(\"{}, {}, {} \\n============\\n\".format(self.__city,self.__country,self.__group_number))\n\n#def __init__(self, name, surname, age, birthday, city, country, group_number)\ndaenerys = Student(\"Daenerys\", \"Targaryen\", 27 ,\"27.05.1995\",\"Lviv\",\"Ukraine\",15)\nsamwell = Student(\"Samwell\", \"Tarly\", 34 ,\"24.04.1988\",\"Ternopil\",\"Ukraine\",13)\njon = Student(\"Jon\", \" Snow\", 33 ,\"23.01.1989\",\"Lublin\",\"Poland\",12)\narya = Student(\"Arya\", \"Stark\", 23 ,\"20.01.1998\",\"Rivne\",\"Ukraine\",12)\nsansa = Student(\"Sansa\", \"Stark\", 24 ,\"22.08.1997\",\"Rivne\",\"Ukraine\",12)\nbrandon = Student(\"Brandon\", \"Stark\", 31 ,\"12.08.1990\",\"Rivne\",\"Ukraine\",12)\nned = Student(\"Ned\", \"Stark\", 29 ,\"14.05.1993\",\"Rivne\",\"Ukraine\",12)\nrobert = Student(\"Robert\", \"Baratheon\", 40 ,\"10.04.1982\",\"Berlin\",\"German\",11)\njamie = Student(\"Jamie\", \"Lannister\", 21 ,\"29.03.2001\",\"London\",\"Great Britain\",14)\ncersei = Student(\"Cersei\", \"Lannister\", 22 ,\"14.02.2000\",\"London\",\"Great Britain\",14)\n\nstudent_list =[]\n\nstudent_list.append(daenerys)\nstudent_list.append(samwell)\nstudent_list.append(jon)\nstudent_list.append(arya)\nstudent_list.append(sansa)\nstudent_list.append(brandon)\nstudent_list.append(ned)\nstudent_list.append(robert)\nstudent_list.append(jamie)\nstudent_list.append(cersei)\n\nfor student in student_list:\n student.show_student_info()\n\n","repo_name":"Alkaponees/Python_Hometask","sub_path":"basic lesson/Student.py","file_name":"Student.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74279230346","text":"import numpy as np\nfrom scipy.sparse import vstack\nfrom sklearn.datasets import load_svmlight_file, load_svmlight_files\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\n\ndef load_cadata(normalize=True):\n # load_data\n features, labels = load_svmlight_file('./data/cadata_sample')\n features = np.asarray(features.todense())\n n_data = features.shape[0]\n # add a bias term\n features = np.hstack((features, np.ones(n_data).reshape(-1, 1)))\n # expand 1 dimension to labels\n labels = labels[:, None]\n # scaler 1: scale features to zero-mean unit-var\n scaler = StandardScaler()\n scaler.fit(features)\n features = scaler.transform(features)\n # scaler 2: scale labels to [0, 1] by min-max\n scaler = MinMaxScaler()\n scaler.fit(labels)\n labels = scaler.transform(labels).squeeze()\n return features, labels \n\ndef load_mnist():\n \"\"\"Mnist is already scaled\"\"\"\n # load_data\n features, labels = load_svmlight_file('./data/mnist_1_7.scale')\n features = np.asarray(features.todense())\n n_data = features.shape[0]\n # add a bias term\n features = np.hstack((features, np.ones(n_data).reshape(-1, 1)))\n # make 7 as negative class\n labels[labels == 7] = -1\n return features, labels \n\ndef load_a9a():\n files = ['./data/a9a', './data/a9a.t']\n features = [None, None]\n labels = [None, None]\n for i in range(2):\n # load_data\n feature, label = load_svmlight_file(files[i], n_features=123)\n feature = np.asarray(feature.todense())\n n_data = feature.shape[0]\n features[i] = feature\n labels[i] = label\n # concatenate\n print(features[0].shape, features[1].shape)\n features = np.concatenate(features, axis=0)\n labels = np.concatenate(labels, axis=0)\n # scale features to zero-mean unit-var\n scaler = StandardScaler()\n scaler.fit(features)\n features = scaler.transform(features)\n pos = np.sum(labels == 1)\n neg = np.sum(labels == -1)\n return features, labels\n\ndef load_rcv1():\n # load_data\n feature, label = load_svmlight_file('./data/rcv1_train.binary')\n feature = np.asarray(feature.todense())\n n_data = feature.shape[0]\n # scale features to zero-mean unit-var\n \"\"\"\n scaler = StandardScaler()\n scaler.fit(feature)\n feature = scaler.transform(feature)\n pos = np.sum(label == 1)\n neg = np.sum(label == -1)\n print(f\"{pos}, {neg}\")\n \"\"\"\n return feature[:10000] * 10, label[:10000]\n\ndef load_e2006():\n # laod data\n feature_tr, label_tr, feature_te, label_te = load_svmlight_files(['./data/E2006.train', \\\n './data/E2006.test'], n_features=150360)\n feature = vstack([feature_tr, feature_te])\n # expand 1 dimension to labels\n label = np.concatenate([label_tr, label_te], axis=0)\n # remove outliers from labels\n std_y = np.std(label)\n mean_y = np.mean(label)\n mask = np.logical_and(label > mean_y - 3.0 * std_y, label < mean_y + 3.0 * std_y)\n print(f'keep {np.sum(mask)} / {len(mask)} rows')\n # select rows\n feature = feature[mask]\n label = label[mask]\n\n # scale labels by standard scaler\n label = label[:, None]\n scaler = MinMaxScaler()\n scaler.fit(label)\n label = scaler.transform(label).squeeze()\n return feature * 10, label\n","repo_name":"xuanqing94/AdvSSL","sub_path":"utils/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"28975299431","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser,Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.timezone import now\n\nclass User(AbstractUser):\n\n mobile = models.CharField(max_length=40,blank=True,null=True,verbose_name='Celular')\n imagen_perfil = models.ImageField(upload_to = 'imagenes/',default='imagenes/no_image.png',verbose_name='Foto de Perfil',blank=False)\n descripcion = models.TextField(verbose_name='Descripción',default='',blank=False)\n fecha_nacimiento = models.DateField(verbose_name='Fecha Nacimiento',default=now)\n\n def __str__(self):\n return self.email\n\n @property\n def get_absolute_detail_url(self):\n from django.urls import reverse_lazy\n return reverse_lazy('core:detail', args=[str(self.id)])\n\n @property\n def get_absolute_edit_url(self):\n from django.urls import reverse_lazy\n return reverse_lazy('core:update', args=[str(self.id)])\n\n @property\n def get_absolute_delete_url(self):\n from django.urls import reverse_lazy\n return reverse_lazy('core:delete', args=[str(self.id)])\n\n def __str__(self):\n return self.get_full_name()\n\nUser._meta.get_field('username').verbose_name = 'Nombre De Usuario'\nUser._meta.get_field('username').help_text = 'Sin espacios ni caracteres especiales'\nUser._meta.get_field('username').error_messages = {\n 'blank' : 'El Campo No Puede Estar En Blanco',\n 'invalid' : 'El Valor No Es Valido',\n 'invalid_choice' : 'Opcion No Valida',\n 'unique' : 'El Usuario Debe Ser Unico'\n}\n\nUser._meta.get_field('email').verbose_name = 'Correo Electronico'\nUser._meta.get_field('first_name').verbose_name = 'Nombre'\nUser._meta.get_field('first_name').blank = False\nUser._meta.get_field('last_name').verbose_name = 'Apellido'\nUser._meta.get_field('last_name').blank = False\nUser._meta.get_field('is_active').verbose_name = 'Activo'\nUser._meta.get_field('is_active').help_text = 'Desactiva el acceso al usuario a las caracteristicas del sistema'","repo_name":"jdmazuera/timeline","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35399019514","text":"# Modelo=[(anúmero, nome, curso, nota, nota, nota, nota)]\r\n\r\nimport csv\r\n\r\n# (1) Crie uma função que lê a informação do ficheiro para um modelo, previamente pensado em memória.\r\ndef le(filename):\r\n file=open(filename, encoding='UTF8') #Mapear o texto para os bytes. Algoritmo de encoding específico.\r\n\r\n csv_file = csv.reader(file, delimiter=',')\r\n file.readline()\r\n\r\n lista=[]\r\n for aluno in csv_file:\r\n lista.append(tuple(aluno))\r\n \r\n\r\n return lista\r\n\r\ndef Imprime(Turma): #Com obras=[(nome, descrição, ano de criação, período, compositor, duração, id)]\r\n print(f\" | {'anúmero':^10} | {'Nome':^25} | {'Curso':^10} | {'Nota1':^10} | {'Nota2':^10} | {'Nota3':^10} | {'Nota4':^10} |\")\r\n\r\n for anum, nome, curso, nota1, nota2, nota3, nota4 in Turma:\r\n \r\n print(f\" | {anum[:10]:^10} | {nome[:25]:^25} | {curso[:10]:^10} | {nota1[:10]:^10} | {nota2[:10]:^10} | {nota3[:10]:^10} | {nota4[:10]:^10} |\")\r\n\r\n# (2) Crie uma função que calcula a distribuição dos alunos por curso.\r\n\r\ndef distCurso(Turma):\r\n dictcursos={}\r\n\r\n for _, nome, curso, *_ in Turma: \r\n if curso in dictcursos.keys():\r\n dictcursos[curso] = dictcursos[curso] + 1\r\n else: \r\n dictcursos[curso] = 1\r\n\r\n \r\n return dictcursos\r\n\r\n# (3) Crie uma função que calcula a média das notas de cada aluno e acrescenta essa nova coluna no dataset em memória.\r\n\r\ndef media(Turma):\r\n dictmedias={}\r\n\r\n for _, nome, _, nota1, nota2, nota3, nota4 in Turma:\r\n media = (int(nota1) + int(nota2) + int(nota3) + int(nota4))/4\r\n if nome not in dictmedias.keys():\r\n dictmedias[nome] = float(media)\r\n \r\n return dictmedias\r\n\r\n# (4) Considere os seguintes escalões de notas: E [1-4], D [5-8], C [9-12], B [13-16], A [17-20], acrescente uma coluna ao dataset com o escalão correspondente a cada aluno.\r\n\r\n# (4.1) Cria uma lista com a média do aluno respetivo. \r\ndef notas(Turma):\r\n dictescaloes={}\r\n\r\n for _, nome, _, nota1, nota2, nota3, nota4 in Turma: \r\n if nome not in dictescaloes.keys():\r\n dictescaloes[nome] = [(int(nota1) + int(nota2) + int(nota3) + int(nota4))/4]\r\n\r\n return dictescaloes\r\n\r\n# (4)\r\n\r\ndef dictmedias(Turma):\r\n a=media(Turma)\r\n \r\n escalaoA = []\r\n escalaoB = []\r\n escalaoC = []\r\n escalaoD = []\r\n escalaoE = []\r\n\r\n lista=[]\r\n\r\n\r\n for aluno in a.keys():\r\n\r\n if (float(a.values) >= 1 and float(a.values) <= 4.99):\r\n escalaoE.append(aluno)\r\n\r\n elif (float(a.values) >= 5 and float(a.values) <= 8.99):\r\n escalaoD.append(aluno)\r\n\r\n if (float(a.values) >= 9 and float(a.values) <= 12.99):\r\n escalaoC.append(aluno)\r\n\r\n elif (float(a.values) >= 13 and float(a.values) <= 16.99):\r\n escalaoB.append(aluno)\r\n\r\n if (float(a.values) >= 17 and float(a.values) <= 20):\r\n escalaoA.append(aluno)\r\n \r\n b = (aluno, a.values(aluno))\r\n \r\n return b\r\n\r\n# (5) Crie uma distribuição dos alunos por escalão.\r\n\r\n\r\n\r\n# (6) Crie uma função que apresenta na forma dum gráfico de linha uma distribuição.\r\n\r\n\r\n\r\n# (7) Crie uma função que imprime na forma de uma tabela uma distribuição.\r\n\r\n\r\n\r\n# (8) Especifique um programa que, ciclicamente, apresenta um menu com todas funcionalidades ao utilizador.\r\n\r\n\r\n\r\n\r\n\r\n\r\n ","repo_name":"a97100Uminho/ATP2022","sub_path":"TPC7/TP7.py","file_name":"TP7.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41638872554","text":"from jour02.job01.Auteur import Auteur\nfrom jour02.job02.Client import Client\nfrom jour02.job01.Livre import Livre\n\n\nclass Bilbiotheque:\n\n def __init__(self, nom: str, catalogue: dict = { Livre: int }):\n self.nom = nom\n self.catalogue = catalogue\n\n def acheter_livre(self, auteur: Auteur, nom_livre: Livre, quantite: int):\n if nom_livre in auteur.oeuvre:\n self.catalogue[nom_livre] += quantite\n\n def inventaire(self):\n for livre in self.catalogue:\n print(\"___________________________________\")\n print(\"Livre :\", livre.titre)\n print(\"quantité :\", self.catalogue[livre])\n\n def louer(self, client: Client, titre_livre: str):\n if self.catalogue[titre_livre] > 0:\n client.collection[titre_livre] = 1\n self.catalogue[titre_livre] -= 1\n\n def rendre_livres(self, client):\n for livre in client.collection:\n self.catalogue[livre] += 1\n client.collection = {}\n\n\nauteur1 = Auteur(\"Ecrivain\", \"Jean\")\nauteur2 = Auteur(\"Palmade\", \"Pierre\")\nauteur1.ecrit_un_livre(\"LIVRE 1\")\nauteur1.ecrit_un_livre(\"LIVRE 2\")\nauteur2.ecrit_un_livre(\"Soirée entre amis\")\n\nbook_shop = Bilbiotheque(\"Alcazar\", {auteur1.oeuvre[0]: 5, auteur1.oeuvre[1]: 3, auteur2.oeuvre[0]: 55})\n\nbook_shop.acheter_livre(auteur1, auteur1.oeuvre[1], 7)\n\nclient1 = Client(\"Pigeon\", \"Timothé\")\nbook_shop.louer(client1, auteur2.oeuvre[0])\nbook_shop.louer(client1, auteur1.oeuvre[1])\nbook_shop.rendre_livres(client1)\n\nbook_shop.inventaire()\n","repo_name":"lucas-verdier/runtrack-python","sub_path":"jour02/job02/Bilbiotheque.py","file_name":"Bilbiotheque.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"689527276","text":"import sys\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math \n\ndef calculateDistance(x1,y1,x2,y2): \n dist = round((math.sqrt((x2 - x1)**2 + (y2 - y1)**2)),5) \n return dist\n\ntry:\n num_points = int(sys.argv[1])\nexcept:\n num_points = 50\n\n#create requested number of points\npoint_arr = []\nfor k in range(num_points):\n p = [random.randint(0,128),random.randint(0,128)]\n while p in point_arr:\n p = [random.randint(0,128),random.randint(0,128)]\n point_arr.append(p)\n \n#print a cartisian plot\n#for a in point_arr:\n# plt.scatter(a[0], a[1])\n#plt.show()\n\n#create empty matrix nxn\nn = len(point_arr)\nmatrix = [[0 for i in range(n)] for j in range(n)]\n#print (np.array(matrix))\n\n# create the complete adjaceny matrix with the weights as strings and no point connected to itself \nposx = 0\nfor pi in point_arr:\n posy = 0\n for q in point_arr: \n matrix[posx][posy]=(calculateDistance(pi[0],pi[1],q[0],q[1]))\n posy+=1\n posx+=1\n# No vertex connects to itself\nfor i in range(len(point_arr)):\n matrix[i][i] = \"x\"\n#print(np.array(matrix))\n\nfilename = str(num_points)+\"CompleteAdj.in\"\n\nf= open(filename,\"w+\")\nfor q in range(5):\n f.write(\"XXXXXX\\n\")\n\n#write adjacency matrix\nfor y in range(len(matrix)):\n for x in range(len(matrix[y])):\n f.write((str(matrix[y][x])+\" \"))\n f.write(\"\\n\")\nf.close() \n","repo_name":"raymondbacco/170Project","sub_path":"genCompleteRandom.py","file_name":"genCompleteRandom.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18309527447","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-#\n'''\nauthor: -- shidegang --\nCreated Time: 2019-08-28 10:43:17\n'''\n\n# 对象中添加一个成员,注意是在对象中添加,类并不变\nclass Foo():\n def __init__(self,name,age):\n self.name = name\n self.age = age\n\n def show(self):\n return '{} : {}'.format(self.name,self.age)\nobj = Foo('tom',20)\n\nsetattr(obj,'name1','joy')\n\nv = getattr(obj,'name1')\n\nprint(v)\n","repo_name":"shidg/note","sub_path":"python/study/OOP/反射-setattr.py","file_name":"反射-setattr.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"30516387153","text":"\"\"\"\nThis package provides an API for relational databases.\n\"\"\"\nimport sqlalchemy as sa\nfrom sqlalchemy.orm import sessionmaker\n\nfrom pacific.db.repository import repository_config\nfrom pacific.db.repository import add_repository\n\n\n__all__ = ['repository_config', 'get_session_factories']\n\n\ndef includeme(config):\n \"\"\" Pyramid configuration entry point.\n Call me before using any of the SQL Sessions.\n\n :param config: Pyramid configurator instance\n :type config: :class:`pyramid.config.Configurator`\n \"\"\"\n config.registry.settings['pacific.db.session_factories'] = get_session_factories(config.registry.settings)\n\n config.add_request_method(request_db, 'db', reify=True)\n # Add a directive that is capable of registering project repositories\n config.add_directive('add_repository', add_repository)\n\n\ndef get_session_factories(settings, options_prefix='pacific.db.'):\n \"\"\"\n\n :param settings:\n :type settings: dict\n :param options_prefix:\n :type options_prefix: str\n :return: dict of {domain => {shard => sessionmaker()}} session factories\n :rtype: dict\n \"\"\"\n session_factories = {}\n for key, value in settings.items():\n if not key.startswith(options_prefix):\n continue\n\n key = key.split(options_prefix)[1]\n domain, shard = key.split('.')\n url = value\n engine = sa.create_engine(url, encoding='utf-8',\n # -- pool options --\n pool_size=10,\n max_overflow=10,\n pool_timeout=10)\n shard_sessions = session_factories.setdefault(domain, {})\n shard_sessions[shard] = sessionmaker(bind=engine, autocommit=False)\n return session_factories\n\n\ndef request_db(request):\n \"\"\"\n\n :param request: Pyramid Request instance\n :type request: :class:`pyramid.request.Request`\n :return: an instance of :class:`RequestDbApi`\n :rtype: :class:`pacific.db.RequestDbApi`\n \"\"\"\n request.add_finished_callback(lambda request: request.db.discard())\n return RequestDbApi(request)\n\n\nclass RequestDbApi(object):\n \"\"\" An instance of this class is used as ``request.db`` attribute.\n \"\"\"\n def __init__(self, request):\n \"\"\"\n :param request: Pyramid Request instance\n :type request: :class:`pyramid.request.Request`\n \"\"\"\n registry_settings = request.registry.settings\n self.repositories = registry_settings['pacific.db.repositories']\n self.session_factories = registry_settings['pacific.db.session_factories']\n\n self.sessions = {}\n self.repository_instances = {}\n\n\n def get_repository(self, name):\n \"\"\" Returns an instance of a Repository object.\n\n :param name: repository name\n :type name: str\n :return: repository instance\n \"\"\"\n try:\n repository_instance = self.repository_instances[name]\n except KeyError:\n repository_conf = self.repositories[name]\n session_instance = self.get_session(repository_conf['namespace'], repository_conf['shard'])\n repository_instance = repository_conf['repository'](session_instance)\n self.repository_instances[name] = repository_instance\n return repository_instance\n\n def get_session(self, namespace, shard='default'):\n \"\"\" Returns a SQLAlchemy Session instance according to the given namespace and shard.\n\n :param namespace: namespace name according to Pacific config.\n :type namespace: str\n :param shard: one of the namespace shards. Shard 'default' is required to be set up\n in the config.\n :type shard: str\n :return: SQLAlchemy's Session instance.\n :rtype: :class:`sqlalchemy.orm.session.Session`\n \"\"\"\n key = '{namespace}:{shard}'.format(namespace=namespace, shard=shard)\n try:\n # find existing session instance\n session_instance = self.sessions[key]\n except KeyError:\n # start a new session\n session_instance = self.session_factories[namespace][shard]()\n self.sessions[key] = session_instance\n return session_instance\n\n def discard(self):\n \"\"\"Close all sessions and return connections to the pool.\"\"\"\n for sess in self.sessions.values():\n sess.close()\n","repo_name":"connectthefuture/Pacific","sub_path":"pacific/db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32838505276","text":"\nfrom flask import Flask,make_response, json, request, session, jsonify\nfrom google.cloud.sql.connector import Connector, IPTypes\nfrom sqlalchemy import create_engine, text\n\nimport os\nimport pg8000\nimport pymysql\nimport sqlalchemy\n\n\napp = Flask(__name__)\n\n@app.route(\"/index\")\ndef main():\n return \"Welcome!\"\n\n\ndef connect_with_connector() -> create_engine:\n instance_connection_name = \"strategic-lens-399220:us-central1:build-pulse\"\n db_user = \"postgres\"\n db_pass = \"buildpulse\"\n db_name = \"BuildPulse\"\n ip_type = IPTypes.PUBLIC\n\n # Initialize Cloud SQL Python Connector object\n connector = Connector()\n\n def getconn():\n conn = connector.connect(\n instance_connection_name,\n \"pg8000\",\n user=db_user,\n password=db_pass,\n db=db_name,\n ip_type=ip_type,\n )\n return conn\n\n # Create a SQLAlchemy engine using the 'creator' argument\n engine = create_engine(\n \"postgresql+pg8000://\",\n creator=getconn,\n )\n return engine\n\n\n\n@app.route(\"/repairs\", methods=['GET'])\ndef repairs():\n try:\n engine = connect_with_connector()\n conn = engine.connect()\n asset_type = request.args.get('asset_type')\n manufacturer = request.args.get('manufacturer')\n start_date = request.args.get('start_date')\n end_date = request.args.get('end_date')\n if (asset_type == \"All\"):\n sql = text(\"select sum(s.repairs),b.zip_code from manufacturer as m,service s,asset as a,building as b,resource as r where a.asset_id=r.asset_id and r.resource_id=s.resource_id and s.manufacturer_id=m.manufacturer_id and r.building_id=b.building_id and m.name= '{}' and last_service_date between '{}' and '{}' group by b.zip_code;\".format(\n manufacturer, start_date, end_date))\n elif (manufacturer == \"All\"):\n sql = text(\"select sum(s.repairs),b.zip_code from manufacturer as m,service s,asset as a,building as b,resource as r where a.asset_id=r.asset_id and r.resource_id=s.resource_id and s.manufacturer_id=m.manufacturer_id and r.building_id=b.building_id and a.asset_type = '{}' and last_service_date between '{}' and '{}' group by b.zip_code;\".format(\n asset_type, start_date, end_date))\n elif (manufacturer == \"All\" and asset_type == \"All\"):\n sql = text(\"select sum(s.repairs),b.zip_code from manufacturer as m,service s,asset as a,building as b,resource as r where a.asset_id=r.asset_id and r.resource_id=s.resource_id and s.manufacturer_id=m.manufacturer_id and r.building_id=b.building_id and last_service_date between '{}' and '{}' group by b.zip_code;\".format(\n start_date, end_date))\n else:\n sql = text(\"select sum(s.repairs),b.zip_code from manufacturer as m,service s,asset as a,building as b,resource as r where a.asset_id=r.asset_id and r.resource_id=s.resource_id and s.manufacturer_id=m.manufacturer_id and r.building_id=b.building_id and m.name= '{}' and a.asset_type = '{}' and last_service_date between '{}' and '{}' group by b.zip_code;\".format(\n manufacturer, asset_type, start_date, end_date))\n\n result = conn.execute(sql).fetchall()\n for i in range(len(result)):\n result[i] = list(result[i])\n return result\n\n except Exception as err:\n print(err)\n\n@app.route(\"/assets\",methods=['GET'])\ndef select_rows():\n try:\n engine = connect_with_connector()\n query = text(\"SELECT * FROM asset;\")\n conn = engine.connect()\n result = conn.execute(query).fetchall()\n for i in range(len(result)):\n result[i] = list(result[i])\n return result\n except Exception as err:\n print(err)\n\n@app.route(\"/count\", methods=['GET'])\ndef count():\n try:\n sql = text(\"select count(s.repairs) as number_of_repairs, a.asset_type from asset as a, service as s, resource as r where a.asset_id=r.asset_id and s.resource_id= r.resource_id and s.repairs>0 group by a.asset_type\")\n engine = connect_with_connector()\n conn = engine.connect()\n result = conn.execute(sql).fetchall()\n for i in range(len(result)):\n result[i] = list(result[i])\n print(result)\n return result\n except Exception as err:\n print(err)\n\n\n@app.route(\"/avg_cost\", methods=['GET'])\ndef avg_cost():\n try:\n sql = text(\"select sum(s.avg_cost), a.asset_type from asset as a, service as s, resource as r where a.asset_id=r.asset_id and s.resource_id= r.resource_id group by a.asset_type;\")\n engine = connect_with_connector()\n conn = engine.connect()\n result = conn.execute(sql).fetchall()\n for i in range(len(result)):\n result[i] = list(result[i])\n return result\n\n except Exception as err:\n print(err)\n\nif __name__ == \"__main__\":\n # conn= getconn()\n app.run(host=\"127.0.0.1\", port=8002)\n","repo_name":"prbln/HACKSMU5","sub_path":"BackEnd/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8089926929","text":"from imageai.Classification import ImageClassification\nimport os\n\nexec_path = os.getcwd()\n\nprediction = ImageClassification()\n\nprediction.setModelTypeAsMobileNetV2()\nprediction.setModelPath(os.path.join(exec_path, 'mobilenet_v2-b0353104.pth'))\nprediction.loadModel()\n\npredctions, probabilities = prediction.classifyImage(\n os.path.join(exec_path, 'butterfly.jpg'), result_count=5)\nfor eachPred, eachProb in zip(predctions, probabilities):\n print(f'{eachPred} : {eachProb}')\n","repo_name":"kurofavo/image_recognizer","sub_path":"brain.py","file_name":"brain.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42157475051","text":"import gym\nimport sys\nsys.path.append('C:/Users/xuwei1/Documents/baselines')\nfrom baselines import deepq\nimport time\n\ndef main():\n env = gym.make('Catcher-v0')\n model = deepq.models.mlp([64])\n act = deepq.learn(\n env,\n q_func=model,\n lr=1e-3,\n max_timesteps=100000,\n buffer_size=50000,\n exploration_fraction=0.5,\n exploration_final_eps=0,\n print_freq=10,\n batch_size=32,\n \n )\n print(\"Saving model to catcher_model.pkl\")\n act.save(\"model/catcher.pkl\")\n\ndef test():\n env = gym.make('Catcher-v0')\n state = env.reset()\n print(state.shape)\n for _ in range(10):\n \tenv.render()\n \ttime.sleep(0.5)\n \tstate, reward, done, _= env.step(env.action_space.sample()) # take a random action\n \tprint((state.shape,reward,done))\n \tif done:\n \t\tbreak\n\nif __name__ == '__main__':\n main()","repo_name":"PlusWayne/catcher","sub_path":"train_catcher.py","file_name":"train_catcher.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27135307242","text":"from typing import (\n List,\n)\n\nclass Solution:\n \"\"\"\n @param candidates: A list of integers\n @param target: An integer\n @return: A list of lists of integers\n we will sort your return value in output\n \"\"\"\n def combination_sum(self, candidates: List[int], target: int) -> List[List[int]]:\n # write your code here\n # 每一個數可以重複選取\n # 要先去重,然後排序\n nums = sorted(list(set(candidates)))\n\n result = []\n self.dfs(nums, 0, [], result, target)\n return result\n\n def dfs(self, nums, start_index, curr_subset, result, target):\n # 出口\n if sum(curr_subset) == target:\n # 如果當前 subset 的和與 target 相同,就放到 result 裡面\n result.append(curr_subset.copy()) # 要用深拷貝\n return\n\n for i in range(start_index, len(nums)):\n # 先看有沒有超出 target\n # 因為 nums 是排序過的,如果加上 nums[i] 那加上 nums[j > i] 也一定超出 target \n if sum(curr_subset) + nums[i] > target:\n break\n # 因為每個數可以重複選取,所以依然從 i 開始\n self.dfs(nums, i, curr_subset + [nums[i]], result, target)\n\n\n","repo_name":"ytatus94/Leetcode","sub_path":"lintcode/lintcode_0135_Combination_Sum.py","file_name":"lintcode_0135_Combination_Sum.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72216941386","text":"import h5py\nimport numpy as np\nfrom mountainlab_pytools import mdaio\n\nclass TimeseriesMdaReader:\n def __init__(self,path,*,samplerate):\n self._samplerate=samplerate\n self._mda_path=path\n X=mdaio.DiskReadMda(path)\n self._num_channels=X.N1()\n self._num_timepoints=X.N2()\n def numChannels(self):\n return self._num_channels\n def numTimepoints(self):\n return self._num_timepoints\n def sampleRate(self):\n return self._samplerate\n def getChunk(self,*,trange=None,channels=None):\n if not channels:\n channels=range(1,self._num_channels+1)\n if not trange:\n trange=[0,self._num_timepoints]\n X=mdaio.DiskReadMda(self._mda_path)\n chunk=X.readChunk(i1=0,i2=trange[0],N1=self._num_channels,N2=trange[1]-trange[0])\n return chunk[np.array(channels)-1,:]\n\nclass TimeseriesHdf5Reader:\n def __init__(self,path):\n self._hdf5_path=path\n with h5py.File(self._hdf5_path,\"r\") as f:\n self._num_chunks=f.attrs['num_chunks']\n self._chunk_size=f.attrs['chunk_size']\n self._padding=f.attrs['padding']\n self._num_channels=f.attrs['num_channels']\n self._num_timepoints=f.attrs['num_timepoints']\n self._samplerate=f.attrs['samplerate']\n def numChannels(self):\n return self._num_channels\n def numTimepoints(self):\n return self._num_timepoints\n def sampleRate(self):\n return self._samplerate\n def getChunk(self,*,trange=None,channels=None):\n if not channels:\n channels=range(1,self._num_channels+1)\n if not trange:\n trange=[0,self._num_timepoints]\n t1=trange[0]\n t2=trange[1]\n if (t1<0) or (t2>self.numTimepoints()):\n ret=np.zeros((len(channels),t2-t1))\n t1a=np.maximum(t1,0)\n t2a=np.minimum(t2,self.numTimepoints())\n ret[:,t1a-(t1):t2a-(t1)]=self.getChunk(trange=[t1a,t2a],channels=channels)\n return ret\n else:\n c1=int(t1/self._chunk_size)\n c2=int((t2-1)/self._chunk_size)\n ret=np.zeros((len(channels),t2-t1))\n with h5py.File(self._hdf5_path,\"r\") as f:\n for cc in range(c1,c2+1):\n if cc==c1:\n t1a=t1\n else:\n t1a=self._chunk_size*cc\n if cc==c2:\n t2a=t2\n else:\n t2a=self._chunk_size*(cc+1)\n for ii in range(len(channels)):\n m=channels[ii]\n assert(cc>=0)\n assert(cc int:\n if not root:\n return 0\n total_sum = 0\n\n stack = deque()\n stack.append((root, 0))\n while stack:\n node, curr_sum = stack.pop()\n curr_sum = curr_sum * 10 + int(node.val)\n\n if not node.left and not node.right:\n total_sum += curr_sum\n \n if node.right:\n stack.append((node.right, curr_sum))\n if node.left:\n stack.append((node.left, curr_sum))\n return total_sum\n\n def sumNumbersRecursive(self, root: Optional[TreeNode]) -> int:\n if not root:\n return 0\n \n total_sum = 0\n \n def dfs(node: TreeNode, curr_sum: int):\n nonlocal total_sum\n curr_sum = curr_sum * 10 + int(node.val)\n if not node.right and not node.left:\n total_sum += curr_sum\n return\n \n if node.left:\n dfs(node.left, curr_sum)\n \n if node.right:\n dfs(node.right, curr_sum)\n \n dfs(root, 0)\n \n return total_sum","repo_name":"vaiol/leetcode2","sub_path":"src/129/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33343098168","text":"import numpy as np\n\nfrom typing import List, Optional\n\nfrom deeplake.core.vectorstore.vector_search.indra import tql_distance_metrics\n\n\ndef create_query_string(\n distance_metric: Optional[str],\n tql_filter: str,\n limit: int,\n order: Optional[str],\n tensor_list: List[str],\n):\n \"\"\"Function for creating a query string from a distance metric, limit and order.\n\n Args:\n distance_metric (str): distance metric to compute similarity of the query embedding with dataset's embeddings.\n tql_filter (str): Additional filter using TQL syntax.\n limit (int): number of samples to return after the search.\n order (str): Type of data ordering after computing similarity score. Defaults to \"ASC\".\n tensor_list (List[str]): List of tensors to return data for.\n\n\n Returns:\n str: TQL representation of the query string.\n \"\"\"\n\n # TODO: BRING THIS BACK AND DELETE IMPLEMENTATION BELOW\n # tql_filter_str = tql_filter if tql_filter == \"\" else \" where \" + tql_filter\n # tensor_list_str = \", \".join(tensor_list)\n # order_str = \"\" if order is None else f\" order by score {order}\"\n # distance_metric_str = (\n # \"\" if distance_metric is None else f\", {distance_metric} as score\"\n # )\n\n # return f\"select * from (select {tensor_list_str}{distance_metric_str}{tql_filter_str}){order_str} limit {limit}\"\n\n ## TODO: DELETE IMPLEMENTATION BELOW AND BRING BACK IMPLEMENTATION ABOVE\n\n tql_filter_str = tql_filter if tql_filter == \"\" else \" where \" + tql_filter\n tensor_list_str = \", \".join(tensor_list)\n distance_metric_str = (\n \"\" if distance_metric is None else f\", {distance_metric} as score\"\n )\n\n order_str = \"\" if order is None else f\" order by {distance_metric} {order}\"\n score_str = \"\" if order is None else f\", score\"\n\n return f\"select {tensor_list_str}{score_str} from (select *{distance_metric_str}{tql_filter_str}{order_str} limit {limit})\"\n\n\ndef create_query(\n distance_metric: str,\n embedding_tensor: str,\n query_embedding: str,\n tql_filter: str,\n limit: int,\n tensor_list: List[str],\n):\n \"\"\"Function for creating a query string from a distance metric, embeddings, query_embedding, and limit.\n\n Args:\n distance_metric (str): distance metric to compute similarity of the query embedding with dataset's embeddings.\n embedding_tensor (str): name of the tensor in the dataset with ``htype = \"embedding\"``.\n query_embedding (str): embedding representation of the query string converted to str.\n tql_filter (str): Additional filter using TQL syntax.\n limit (int): number of samples to return after the search.\n tensor_list (List[str]): List of tensors to return data for.\n\n\n Returns:\n str: TQL representation of the query string.\n \"\"\"\n\n order = tql_distance_metrics.get_order_type_for_distance_metric(distance_metric)\n tql_distrance_metric = tql_distance_metrics.get_tql_distance_metric(\n distance_metric, embedding_tensor, query_embedding\n )\n\n query = create_query_string(\n tql_distrance_metric, tql_filter, limit, order, tensor_list\n )\n return query\n\n\ndef convert_tensor_to_str(query_embedding: np.ndarray):\n \"\"\"Function for converting a query embedding to a string\n\n We need to convert tensor to a string to be able to use tql\n with the query embedding. Here we will assume that query_embedding\n is always 2D and first dimension is always 1. At some point the\n logic should be extended to support queries of different dimensions.\n\n Args:\n query_embedding (Union[List[float], np.ndarray]) - embedding representation of the query string.\n \"\"\"\n if len(query_embedding.shape) > 1:\n query_embedding = query_embedding.transpose(1, 0)\n query_embedding = query_embedding[:, 0]\n\n query_embedding_str = \"\"\n\n for item in query_embedding:\n query_embedding_str += f\"{item}, \"\n\n return f\"ARRAY[{query_embedding_str[:-2]}]\"\n\n\ndef parse_query(\n distance_metric: str,\n limit: int,\n query_embedding: np.ndarray,\n embedding_tensor: str,\n tql_filter: str,\n tensor_list: List[str],\n) -> str:\n \"\"\"Function for converting query_embedding into tql query.\n\n Args:\n distance_metric (str): distance metric to compute similarity of the query embedding with dataset's embeddings.\n embedding_tensor (str): name of the tensor in the dataset with `htype = \"embedding\"`.\n query_embedding (np.ndarray]): embedding representation of the query string.\n limit (int): number of samples to return after the search.\n tql_filter (str): Additional filter using TQL syntax.\n tensor_list (list[str]): List of tensors to return data for.\n\n\n Returns:\n str: converted tql query string.\n \"\"\"\n if query_embedding is None:\n return create_query_string(None, tql_filter, limit, None, tensor_list)\n\n else:\n query_embedding_str = convert_tensor_to_str(query_embedding)\n\n return create_query(\n distance_metric,\n embedding_tensor,\n query_embedding_str,\n tql_filter,\n limit,\n tensor_list,\n )\n","repo_name":"activeloopai/deeplake","sub_path":"deeplake/core/vectorstore/vector_search/indra/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","stars":7141,"dataset":"github-code","pt":"81"} +{"seq_id":"14747940625","text":"from django.conf.urls import url\nfrom . import views\nfrom django.urls import path\n\napp_name = 'LMLAdmin'\nurlpatterns = [\n path('',views.home,name='home'),\n path('account/',views.useradminaccount,name='useradminaccount'),\n path('messages/employee/',views.employee_messages ,name='E_messages'),\n path('messages/random/',views.random_messages ,name='R_messages'),\n path('messages/company/',views.company_messages, name='C_messages'),\n path('allcandidates/',views.employees, name='employees'),\n path('candidateeedetails/',views.employeesdetails, name='employeesdetails'),\n path('premiumemployees/',views.premiumemployees, name='premiumemployees'),\n path('basicemployees/',views.basicemployees, name='basicemployees'),\n path('ultimateemployees/',views.ultimateemployees, name='ultimateemployees'),\n path('shortlistedemployees/',views.shortlistedemployees, name='shortlistedemployees'),\n path('allshortlistedemployeeshistory/',views.allshortlistedemployeeshistory, name='allshortlistedemployeeshistory'),\n path('registeredemployees/',views.registeredemployees, name='registeredemployees'),\n path('unregipayedemployees/',views.unregipayedemployees, name='unregipayedemployees'),\n path('deactivatedemployees/',views.deactivatedemployees, name='deactivatedemployees'),\n\n\n path('carouselImages/',views.carouselImages, name='carouselImages'),\n path('carouselImagesDelete//',views.carouselImagesDelete, name='carouselImagesDelete'),\n\n\n path('allcompanies/',views.companies, name='companies'),\n path('companydetails//',views.companydetails, name='companydetails'),\n path('allpremiumcompanies/',views.premiumcompanies, name='premiumcompanies'),\n path('allplatinumcompanies/',views.platinumcompanies, name='platinumcompanies'),\n path('allbasiccompanies/',views.basiccompanies, name='basiccompanies'),\n path('allproultimatecompanies/',views.proultimatecompanies, name='proultimatecompanies'),\n path('allprobasiccompanies/',views.probasiccompanies, name='probasiccompanies'),\n path('allultimatecompanies/',views.ultimatecompanies, name='ultimatecompanies'),\n path('allundefinedcompanies/',views.undefinedcompanies, name='undefinedcompanies'),\n path('allcompaniesregpayment/',views.companiesregpayment, name='companiesregpayment'),\n path('allcompaniesregunpayment/',views.companiesregunpayment, name='companiesregunpayment'),\n path('alldeactivatedemployers/',views.deactivatedemployers, name='deactivatedemployers'),\n\n\n\n path('companyPricing/',views.companyPricing, name='companyPricing'),\n path('addcompanyPricing/',views.addcompanyPricing, name='addcompanyPricing'),\n path('deletecompanyPricing/',views.deletecompanyPricing, name='deletecompanyPricing'),\n path('deleteallcompanyPricing/',views.deleteallcompanyPricing, name='deleteallcompanyPricing'),\n path('editcompanyPricing/',views.editcompanyPricing, name='editcompanyPricing'),\n path('allcategories/',views.categories, name='categories'),\n\n # graph\n path('customer_graph/', views.customer_graph, name='customer_graph'),\n path('registration_graph/', views.registration_graph, name='registration_graph'),\n path('registration_graph_time/', views.registration_graph_time, name='registration_graph_time'),\n path('companystatuspaymentgraph/', views.companystatuspaymentgraph, name='companystatuspaymentgraph'),\n path('companystatuspaymentgraphtime/', views.companystatuspaymentgraphtime, name='companystatuspaymentgraphtime'),\n path('company_graph/', views.company_graph, name='company_graph'),\n path('company_graph_time_filter/', views.company_graph_time_filter, name='company_graph_time_filter'),\n path('candidate_graph_time_filter/', views.candidate_graph_time_filter, name='candidate_graph_time_filter'),\n path('messages_graph/', views.messages_graph, name='messages_graph'),\n\n # emails\n path('replytorandommessagesviaemail/', views.reply_to_random_messages_via_email, name='reply_to_random_messages_via_email'),\n path('replyemail/', views.replyemail, name='replyemail'),\n\n\n # whatweoffer\n path('allwhatweoffer/',views.whatweoffer, name='whatweoffer'),\n path('addwhatweoffer/',views.addwhatweoffer, name='addwhatweoffer'),\n path('editwhatweoffer/',views.editwhatweoffer, name='editwhatweoffer'),\n path('deletewhatweoffer/',views.deletewhatweoffer, name='deletewhatweoffer'),\n\n\n # changestatus\n path('changecandidatestatustonewbee//', views.changecandidatestatustonewbee, name='changecandidatestatustonewbee'),\n path('changecandidatestatustoregi//', views.changecandidatestatustoregi, name='changecandidatestatustoregi'),\n path('changecandidatestatustodeac//', views.changecandidatestatustodeac, name='changecandidatestatustodeac'),\n\n # changestatus\n path('changecompanystatustonewbee//', views.changecompanystatustonewbee, name='changecompanytatustonewbee'),\n path('changecompanystatustoregi//', views.changecompanystatustoregi, name='changecompanystatustoregi'),\n path('changecompanystatustodeac//', views.changecompanystatustodeac, name='changecompanystatustodeac'),\n\n # candidatepricing\n path('allcandidateregpricing/', views.candidateregpricing, name='candidateregpricing'),\n path('allcandidateaddregpricing/', views.candidateaddregpricing, name='candidateaddregpricing'),\n path('allcandidateupdateregpricing//', views.candidateupdateregpricing, name='candidateupdateregpricing'),\n path('allcandidatedeleteregpricing//', views.candidatedeleteregpricing, name='candidatedeleteregpricing'),\n path('allcandidatestatusregpricing//', views.candidatestatusregpricing, name='candidatestatusregpricing'),\n\n # compdidatepricing\n path('allcompanyregpricing/', views.companyregpricing, name='companyregpricing'),\n path('allcompanyaddregpricing/', views.companyaddregpricing, name='companyaddregpricing'),\n path('allcompanyupdateregpricing//', views.companyupdateregpricing, name='companyupdateregpricing'),\n path('allcompanydeleteregpricing//', views.companydeleteregpricing, name='companydeleteregpricing'),\n path('allcompanystatusregpricing//', views.companystatusregpricing, name='companystatusregpricing'),\n\n path('analytics/', views.analytics, name='analytics'),\n path('admin/change_password', views.admin_change_password, name='admin_change_password'),\n path('admin/edit_account', views.admin_edit_account, name='admin_edit_account'),\n\n path('county/', views.county, name='county'),\n path('region/', views.region, name='region'),\n\n path('compayments/', views.payments, name='companypayments'),\n path('candpayments/', views.candpayments, name='candidatepayments'),\n]","repo_name":"eugenewere/new_lml","sub_path":"lmlappadmin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23449049517","text":"import xarray as xa, numpy as np\nfrom typing import List, Union, Tuple, Optional, Dict, Type\nfrom matplotlib.image import AxesImage\nfrom matplotlib.widgets import Slider, Button\nfrom matplotlib.axes import Axes\nimport matplotlib.pyplot as plt\n\nclass ImageBrowser:\n\n def __init__(self, label: str, ax: Axes, images: List[xa.DataArray], plot_args: Dict, **kwargs ):\n self.ax: Axes = ax\n self.label = label\n self.name = plot_args.pop('name','')\n self.images: List[xa.DataArray] = images\n self.overlay_plots: List[Tuple[str,AxesImage]] = []\n self.overlay_index = 0\n self.build( plot_args )\n\n def norm(self, x: np.ndarray ) -> np.ndarray:\n xmin, xmax = np.nanmin(x), np.nanmax(x)\n return (x-xmin)/(xmax-xmin)\n\n def build(self, plot_args: Dict):\n cmap = plot_args.pop('cmap', \"jet\")\n overlay_args = dict( overlays= plot_args.pop( 'overlays', {} ),\n overlay_alpha = plot_args.pop( 'overlay_alpha', 0.5 ) )\n idata = self.image_data(0)\n self.plot: AxesImage = self.ax.imshow( idata, cmap=cmap, origin=\"lower\", **plot_args )\n self.build_slider()\n self.build_overlay( **overlay_args )\n\n def build_slider(self):\n self.ax.figure.subplots_adjust( bottom=0.25 )\n sax = self.ax.figure.add_axes([0.1, 0.1, 0.65, 0.03])\n svm = len( self.images )-1\n self.slider = Slider( ax=sax, label=self.label, valmin=0, valmax=svm, valinit=0, valstep=1, dragging=True )\n self.slider.on_changed(self.update)\n\n def build_overlay(self, **kwargs ):\n self.overlays: Dict[str,xa.DataArray] = kwargs.get('overlays', {})\n bax = self.ax.figure.add_axes([0.8, 0.1, 0.15, 0.03])\n self.overlay_alpha = kwargs.get('overlay_alpha',0.7)\n for oname,overlay in self.overlays.items():\n overlay_data = self.norm( overlay[0].values )\n plot = self.ax.imshow(overlay_data, cmap='binary', origin=\"lower\", alpha=0.0)\n self.overlay_plots.append( (oname,plot) )\n self.ax.set_title(self.name)\n self.overlay_button = Button( bax, 'Overlay', hovercolor='0.975' )\n self.overlay_button.on_clicked(self.toggle_overlay)\n\n def toggle_overlay( self, *args ):\n omod = len(self.overlay_plots) + 1\n print(f'toggle_overlay {omod}')\n self.overlay_index = (self.overlay_index + 1) % omod\n for idx, ( oname, overlay_plot ) in enumerate(self.overlay_plots):\n overlay_plot.set_alpha( self.overlay_alpha if (idx+1==self.overlay_index) else 0.0 )\n print(f'set_alpha: {self.overlay_index}')\n self.ax.set_title( self.name if self.overlay_index == 0 else self.overlay_plots[self.overlay_index-1][0] )\n print('set_title')\n self.ax.figure.canvas.draw_idle()\n\n def image_data(self, step: int ) -> np.ndarray:\n return self.norm( self.images[step].values )\n\n def update(self, step ):\n try:\n self.plot.set_data( self.image_data(step) )\n # for ixd,cmap,overlays in enumerate(self.overlays.items()):\n # if len(overlays) > 1:\n # self.overlay_plot[1].set_data( overlays[step] )\n self.ax.figure.canvas.draw_idle()\n except Exception as err:\n print( err )\n\n\n","repo_name":"nasa-nccs-cds/FoundationModelBase","sub_path":"fmbase/plot/scrap/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6923475755","text":"class Solution(object):\n def searchRange(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n start = 0\n end = len(nums) - 1\n idx = 0\n output = [-1, -1]\n if len(nums) == 1 and nums[0] == target:\n return [0, 0]\n while start <= end:\n mid = start + (end - start) // 2\n if target < nums[mid]:\n end = mid - 1\n if target > nums[mid]:\n start = mid + 1\n if target == nums[mid]:\n output[idx] = mid\n idx = 1\n if mid == start:\n start = mid + 1\n elif target < nums[mid + 1]:\n end = mid - 1\n elif target > nums[mid - 1]:\n start = mid + 1\n output.sort()\n test = output[0] * output[1]\n if -1 in output:\n return [output[1], output[1]]\n\n return output\n\nif __name__ == '__main__':\n result1 = Solution().searchRange([1,3], 1)\n print (result1)","repo_name":"watermeloniiiii/leetcode","sub_path":"Array/Q34.py","file_name":"Q34.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13826717845","text":"var1 = \"Hello World!\"\nvar2 = \"Python Programming\"\n\n# 1.print 'H' from var1\n\n# 2.print \"ython\" from var2\n\n\n\ns = \"Hello\"\n\n# 1. print 'ell'\n# 2. print 'lo'\n# 3. print 'el'\n\n\n\nlanguage = \"Python\"\n\n# 1. print 'P'\n# 2. print 'o'\n# 3. print 'Python'\n# 4. print 'yt'\n# 5. print 'n'\n# 6. print 'thon'\n\n\nspell1 = \"ABCDEFG\"\nspell2 = \"HIJKLMN\"\nspell3 = \"OPQRSTU\"\nspell4 = \"VWXYZ\"\n# print your name by accessing values in strings","repo_name":"Juhkim90/L18-Access-Values-in-String","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74885226826","text":"import io\nimport joblib\nfrom PIL import Image\nfrom flask import Flask\nfrom flask_restplus import Api, Resource, fields, abort, inputs\nfrom werkzeug.datastructures import FileStorage\nfrom face_recognition import preprocessing\n\nface_recogniser = joblib.load('model/face_recogniser.pkl')\npreprocess = preprocessing.ExifOrientationNormalize()\n\nIMAGE_KEY = 'image'\nINCLUDE_PREDICTIONS_KEY = 'include_predictions'\napp = Flask(__name__)\napi = Api(app, version='0.1.0', title='Face Recognition API', doc='/docs')\n\nparser = api.parser()\nparser.add_argument(IMAGE_KEY, type=FileStorage, location='files', required=True,\n help='Image on which face recognition will be run.')\nparser.add_argument(INCLUDE_PREDICTIONS_KEY, type=inputs.boolean, default=False,\n help='Whether to include all predictions in response.')\n\nbounding_box = api.model('BoundingBox', {\n 'left': fields.Float,\n 'top': fields.Float,\n 'right': fields.Float,\n 'bottom': fields.Float,\n})\n\nprediction = api.model('Prediction', {\n 'label': fields.String,\n 'confidence': fields.Float\n})\n\nface_model = api.model('Face', {\n 'top_prediction': fields.Nested(prediction),\n 'bounding_box': fields.Nested(bounding_box),\n 'all_predictions': fields.List(fields.Nested(prediction))\n})\n\nresponse_model = api.model('Response', {\n 'faces': fields.List(fields.Nested(face_model))\n})\n\nerror_model = api.model('ErrorResponse', {\n 'message': fields.String\n})\n\n\n@api.route('/face-recognition')\nclass FaceRecognition(Resource):\n @api.expect(parser, validate=True)\n @api.marshal_with(response_model)\n @api.response(200, 'Success')\n @api.response(400, 'No image file in request.', error_model)\n def post(self):\n args = parser.parse_args()\n if IMAGE_KEY not in args:\n abort(400, \"Image field '{}' doesn't exist in request!\".format(IMAGE_KEY))\n\n img = Image.open(io.BytesIO(args[IMAGE_KEY].read()))\n img = preprocess(img)\n # convert image to RGB (stripping alpha channel if exists)\n img = img.convert('RGB')\n faces = face_recogniser(img)\n return \\\n {\n 'faces': [\n {\n 'top_prediction': face.top_prediction._asdict(),\n 'bounding_box': face.bb._asdict(),\n 'all_predictions': [p._asdict() for p in face.all_predictions] if\n args[INCLUDE_PREDICTIONS_KEY] else None\n }\n for face in faces\n ]\n }\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","repo_name":"arsfutura/face-recognition","sub_path":"api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"81"} +{"seq_id":"70314894346","text":"import datetime\nimport pytz\nfrom _main_.utils.common import encode_data_for_URL, serialize_all\nfrom _main_.utils.constants import COMMUNITY_URL_ROOT\nfrom _main_.utils.emailer.send_email import send_massenergize_email_with_attachments\nfrom api.constants import GUEST_USER\nfrom api.utils.constants import USER_EVENTS_NUDGE_TEMPLATE\nfrom database.models import Community, CommunityMember, Event, UserProfile, FeatureFlag\nfrom django.db.models import Q\nfrom dateutil.relativedelta import relativedelta\nfrom database.utils.common import get_json_if_not_none\n\nfrom database.utils.settings.model_constants.events import EventConstants\nfrom django.utils import timezone\n\nWEEKLY = \"per_week\"\nBI_WEEKLY = \"biweekly\"\nMONTHLY = \"per_month\"\nDAILY=\"per_day\"\n\n\n\neastern_tz = pytz.timezone(\"US/Eastern\")\n\nLIMIT=5\n\nUSER_PREFERENCE_DEFAULTS = {\n \"communication_prefs\": {\n \"update_frequency\": {\"per_week\": {\"value\": True}},\n \"news_letter\": {\"as_posted\": {\"value\": True}},\n \"messaging\": {\"yes\": {\"value\": True}},\n },\n \"notifications\": {\n \"upcoming_events\": {\"never\": {\"value\": True}},\n \"upcoming_actions\": {\"never\": {\"value\": True}},\n \"news_teams\": {\"never\": {\"value\": True}},\n \"new_testimonials\": {\"never\": {\"value\": True}},\n \"your_activity_updates\": {\"never\": {\"value\": True}},\n },\n }\n\n\n\nUSER_EVENT_NUDGE_KEY = \"user-event-nudge-feature-flag\"\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n\ndef should_user_get_nudged(user):\n user_preferences = user.preferences if user.preferences else {}\n portal_preferences = user_preferences.get(\"user_portal_settings\", USER_PREFERENCE_DEFAULTS)\n\n user_communication_preferences = portal_preferences.get(\"communication_prefs\", {})\n freq = user_communication_preferences.get(\"update_frequency\", {})\n notification_dates = user.notification_dates\n last_notified = notification_dates.get(\"user_event_nudge\", \"\") if notification_dates else None\n if last_notified:\n freq_keys = freq.keys()\n\n if len(freq_keys) == 0 or WEEKLY in freq_keys:\n in_a_week_from_last_nudge = datetime.datetime.strptime(last_notified, '%Y-%m-%d') + relativedelta(weeks=1)\n if in_a_week_from_last_nudge.date() <= datetime.date.today():\n return True\n\n if BI_WEEKLY in freq_keys:\n in_two_weeks_from_last_nudge = datetime.datetime.strptime(\n last_notified, '%Y-%m-%d') + relativedelta(weeks=2)\n if in_two_weeks_from_last_nudge.date() <= datetime.date.today():\n return True\n\n if MONTHLY in freq_keys:\n in_a_month_from_last_nudge = datetime.datetime.strptime(\n last_notified, '%Y-%m-%d') + relativedelta(months=1)\n if in_a_month_from_last_nudge.date() <= datetime.date.today():\n return True\n \n if DAILY in freq_keys:\n in_a_day_from_last_nudge = datetime.datetime.strptime(\n last_notified, '%Y-%m-%d') + relativedelta(days=1)\n if in_a_day_from_last_nudge.date() <= datetime.date.today():\n return True\n\n else:\n return True\n \n\n\n\ndef get_email_lists(users):\n emails = []\n for user in users:\n if should_user_get_nudged(user):\n emails.append(user.email)\n\n return emails\n \n\ndef update_last_notification_dates(email):\n new_date = str(datetime.date.today())\n user = UserProfile.objects.filter(email=email).first()\n user_notification_dates = user.notification_dates if user.notification_dates else {}\n\n notification_dates = {**user_notification_dates,\"user_event_nudge\":new_date}\n UserProfile.objects.filter(email=email).update(**{\"notification_dates\":notification_dates })\n\n\ndef get_community_events(community_id):\n events = Event.objects.filter(\n Q(community__id=community_id) | # parent community events\n Q(shared_to__id=community_id), # events shared to community\n is_published=True, \n is_deleted=False, \n start_date_and_time__gte=timezone.now(),\n ).distinct()\n \n return events\n\n\ndef get_community_users(community_id):\n community_members = CommunityMember.objects.filter(community__id=community_id, is_deleted=False).values_list(\"user\", flat=True)\n users = UserProfile.objects.filter(id__in=community_members)\n return users\n\ndef generate_change_pref_url(subdomain,email, login_method):\n encoded = encode_data_for_URL({\"email\": email,\"login_method\": login_method,})\n url = f\"{COMMUNITY_URL_ROOT}/{subdomain}/profile/settings/?cred={encoded}\"\n return url\n\ndef get_logo(event):\n if event.get(\"image\"):\n return event.get(\"image\").get(\"url\")\n elif event.get(\"community\", {}).get(\"logo\"):\n return event.get(\"community\").get(\"logo\").get(\"url\")\n return \"\"\n\n\ndef convert_date(date, format):\n return date.astimezone(eastern_tz).strftime(format)\n\ndef get_date_range(start, end):\n start_date =start.strftime('%b-%d-%Y')\n end_date = end.strftime('%b-%d-%Y')\n if start_date == end_date:\n return f\"{convert_date(start,'%b %d, %Y')}, {convert_date(start,' %H:%M %p')} - {convert_date(end,' %H:%M %p')}\"\n return f\"{convert_date(start,'%b %d, %Y %H:%M %p')} - {convert_date(end,'%b %d, %Y %H:%M %p')}\"\n\n\ndef truncate_title(title):\n if len(title) > 50:\n return title[:50] + \"...\"\n return title\n \ndef prepare_events_email_data(events):\n events = serialize_all(events, full=True)\n\n data = [{\n \"logo\": get_logo(event),\n \"title\": truncate_title(event.get(\"name\")),\n \"date\": get_date_range(event.get(\"start_date_and_time\"), event.get(\"end_date_and_time\")),\n \"location\": \"In person\" if event.get(\"location\") else \"Online\",\n \"view_link\": f'{COMMUNITY_URL_ROOT}/{event.get(\"community\", {}).get(\"subdomain\")}/events/{event.get(\"id\")}',\n } for event in events]\n #sort list of events by date\n data = (sorted(data, key=lambda i: i['date'], reverse=True))\n return data\n\n\ndef send_events_report_email(name, email, event_list, comm, login_method=\"\"):\n try:\n events = prepare_events_email_data(event_list[:LIMIT])\n has_more_events = len(event_list) > LIMIT\n change_pref_link = generate_change_pref_url(comm.subdomain,email,login_method)\n data = {}\n data[\"name\"] = name.split(\" \")[0]\n data[\"change_preference_link\"] = change_pref_link\n data[\"events\"] = events\n data[\"has_more_events\"] = {\n \"view_more_link\": f'{COMMUNITY_URL_ROOT}/{comm.subdomain}/events?ids={\"-\".join([str(event.id) for event in event_list[LIMIT:]])}',\n } if has_more_events else None \n\n data[\"community_logo\"] = get_json_if_not_none(comm.logo).get(\"url\") if comm.logo else \"\"\n data[\"cadmin_email\"]=comm.owner_email if comm.owner_email else \"\"\n data[\"community\"] = comm.name\n send_massenergize_email_with_attachments(USER_EVENTS_NUDGE_TEMPLATE, data, [email], None, None)\n return True\n except Exception as e:\n print(\"send_events_report exception: \" + str(e))\n return False\n\n\ndef send_automated_nudge(events, user, community):\n if len(events) > 0 and user:\n name = user.full_name\n email = user.email\n login_method = (user.user_info or {}).get(\"login_method\") or \"\"\n if not name or not email:\n print(\"Missing name or email for user: \" + str(user))\n return False\n user_is_ready_for_nudge = should_user_get_nudged(user)\n\n if user_is_ready_for_nudge:\n print(\"sending nudge to \" + email)\n is_sent = send_events_report_email(name, email, events, community,login_method)\n if not is_sent:\n print( f\"**** Failed to send email to {name} for community {community.name} ****\")\n return False\n update_last_notification_dates(email)\n return True\n\n\ndef send_user_requested_nudge(events, user, community):\n if len(events) > 0 and user:\n name = user.full_name\n email = user.email\n login_method = (user.user_info or {}).get(\"login_method\") or \"\"\n is_sent = send_events_report_email(name, email, events, community, login_method)\n if not is_sent:\n print(f\"**** Failed to send email to {name} for community {community.name} ****\")\n return False\n return True\n \n\n\ndef get_user_events(notification_dates, community_events):\n today = timezone.now()\n a_week_ago = today - relativedelta(weeks=1)\n a_month_ago = today - relativedelta(months=1)\n date_aware =None\n if not notification_dates:\n return community_events.filter(Q(published_at__range=[a_month_ago, today]))\n \n user_event_nudge = notification_dates.get(\"user_event_nudge\", None)\n if user_event_nudge:\n last_received_at = datetime.datetime.strptime(user_event_nudge, '%Y-%m-%d')\n date_aware = timezone.make_aware(last_received_at, timezone=timezone.get_default_timezone())\n \n # if user hasn't received a nudge before, get all events that went live within the week\n # else use the last nudge date\n last_time = date_aware if date_aware else a_week_ago\n\n return community_events.filter(Q(published_at__range=[last_time, today]))\n\n\n'''\nNote: This function only get email as argument when the\nnudge is requested on demand by a cadmin on user portal.\n'''\n# Entry point\ndef prepare_user_events_nudge(email=None, community_id=None):\n try:\n if email and community_id:\n all_community_events = get_community_events(community_id)\n user = UserProfile.objects.filter(email=email).first()\n community = Community.objects.filter(id=community_id).first()\n events = get_user_events(user.notification_dates, all_community_events)\n send_user_requested_nudge(events, user, community)\n\n return True\n \n flag = FeatureFlag.objects.get(key=USER_EVENT_NUDGE_KEY)\n if not flag or not flag.enabled():\n return False\n \n communities = Community.objects.filter(is_published=True, is_deleted=False)\n communities = flag.enabled_communities(communities)\n for community in communities:\n events = get_community_events(community.id)\n users = get_community_users(community.id)\n users = flag.enabled_users(users)\n\n for user in users: \n user_events = get_user_events(user.notification_dates, events)\n send_automated_nudge(user_events, user, community)\n \n return True \n except Exception as e:\n print(\"Community member nudge exception: \" + str(e))\n return False\n ","repo_name":"massenergize/api","sub_path":"src/task_queue/events_nudge/user_event_nudge.py","file_name":"user_event_nudge.py","file_ext":"py","file_size_in_byte":10838,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"28819239255","text":"class LRUCache:\n\n def __init__(self, capacity: int):\n self.capacity = capacity\n self.m = {}\n self.n = {}\n self.activity = 0\n \n \n\n def get(self, key: int) -> int:\n \n if key in self.m:\n self.activity +=1\n self.n[key] = self.activity\n return self.m[key]\n else:\n return -1\n \n\n def put(self, key: int, value: int) -> None:\n \n if key in self.m:\n self.activity +=1\n self.n[key] = self.activity\n self.m[key]= value\n else:\n if len(self.m)< self.capacity:\n self.activity +=1\n self.n[key] = self.activity\n self.m[key]= value\n \n else:\n self.activity +=1\n self.n[key] = self.activity\n self.m.pop(min(self.n, key=self.n.get))\n self.n.pop(min(self.n, key=self.n.get))\n self.m[key] = value\n \n \n \n \n \n \n\n\n# Your LRUCache object will be instantiated and called as such:\n# obj = LRUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)","repo_name":"mechtotech/leetcode","sub_path":"146-lru-cache/146-lru-cache.py","file_name":"146-lru-cache.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73504574344","text":"import random\n\n\n# Классы исключений\nclass BoardOutException(Exception): # координаты мимо поля\n pass\n\n\nclass ReshootException(Exception): # выстрел в точку в которую уже стреляли\n pass\n\n\nclass ShipAssignment(Exception): # не удалось расставить корабли на доску\n pass\n\n\n# Класс точек\nclass Dot:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __eq__(self, other):\n \"\"\"\n Метод проверяет равенство точек\n \"\"\"\n if isinstance(other, Dot):\n return self.x == other.x and self.y == other.y\n return False\n\n\n# Клас кораблей\nclass Ship:\n def __init__(self, length, start_point, horizontal):\n self.length = length # int 1-3\n self.start_point = start_point # нос корабля (1, 1)\n self.horizontal = horizontal # если горизонтально то True, нет False\n self.health = length # кол-во жизней, изначально равно длине корабля\n\n def dots(self):\n \"\"\"\n Метод возвращает список всех точек корабля.\n \"\"\"\n dots = []\n for i in range(self.length):\n if self.horizontal:\n dot = Dot(self.start_point.x, self.start_point.y + i)\n else:\n dot = Dot(self.start_point.x + i, self.start_point.y)\n dots.append(dot)\n return dots\n\n\nclass Board:\n def __init__(self, size=6, hid=False):\n self.board = [[\"◯\"] * size for _ in range(size)]\n self.list_ships = []\n self.size = size\n self.hid = hid\n self.live_ships = 0\n self.all_contour = []\n\n def add_ship(self, ship):\n \"\"\"\n Метод add_ship, который ставит корабль на доску (если ставить не получается, выбрасываем исключения)\n \"\"\"\n for dot in ship.dots():\n if self.out(dot) or dot in self.all_contour or self.board[dot.x][dot.y] == \"■\":\n raise ShipAssignment(\"Не удалось поставить корабль на доску\")\n\n for dot in ship.dots():\n self.board[dot.x][dot.y] = \"■\"\n\n self.list_ships.append(ship)\n self.live_ships += 1\n self.all_contour.extend(self.contour(ship))\n\n def contour(self, ship):\n \"\"\"\n Метод contour, который обводит корабль по контуру. Он будет полезен и в ходе самой игры,\n и в при расстановке кораблей (помечает соседние точки, где корабля по правилам быть не может).\n \"\"\"\n contour = []\n for dot in ship.dots():\n for i in range(dot.x - 1, dot.x + 2):\n for j in range(dot.y - 1, dot.y + 2):\n if self.out(Dot(i, j)) or Dot(i, j) in ship.dots():\n continue\n\n contour.append(Dot(i, j))\n return contour\n\n def show_board(self):\n \"\"\"\n Метод, который выводит доску в консоль в з��висимости от параметра hid\n \"\"\"\n print(\" | \" + \" | \".join(str(i + 1) for i in range(self.size)) + \" |\")\n for i in range(self.size):\n row = \"\"\n if i < self.size:\n row += \" \"\n row += str(i + 1) + \" | \"\n for j in range(self.size):\n if self.hid and self.board[i][j] == \"■\":\n row += \"◯ | \"\n else:\n row += f\"{self.board[i][j]} | \"\n print(row)\n\n def out(self, dot):\n \"\"\"\n Метод out, который для точки (объекта класса Dot) возвращает True,\n если точка выходит за пределы поля, и False, если не выходит.\n \"\"\"\n return not ((0 <= dot.x < self.size) and (0 <= dot.y < self.size))\n\n def shot(self, dot):\n \"\"\"\n Метод shot, который делает выстрел по доске (если есть попытка выстрелить за пределы и в использованную точку,\n нужно выбрасывать исключения).\n \"\"\"\n if self.out(dot):\n raise BoardOutException(\"Стреляете мимо доски!\")\n\n if self.board[dot.x][dot.y] in ['T', 'X']:\n raise ReshootException(\"Вы уже стреляли сюда!\")\n\n if self.board[dot.x][dot.y] == '◯':\n self.board[dot.x][dot.y] = \"T\"\n print('Мимо')\n return False\n\n self.board[dot.x][dot.y] = \"X\"\n for ship in self.list_ships:\n if dot in ship.dots():\n ship.health -= 1\n print('Ранил')\n if ship.health == 0:\n self.live_ships -= 1\n print('Убил')\n for d in self.contour(ship):\n self.board[d.x][d.y] = 'T'\n return True\n\n\nclass Player:\n def __init__(self, own_board, enemy_board):\n self.own_board = own_board\n self.enemy_board = enemy_board\n\n def ask(self):\n \"\"\"\n Метод, который «спрашивает» игрока, в какую клетку он делает выстрел.\n Пока мы делаем общий для AI и пользователя класс, этот метод мы описать не можем.\n Оставим этот метод пустым. Тем самым обозначим, что потомки должны реализовать этот метод.\n \"\"\"\n pass\n\n def move(self):\n \"\"\"\n Метод, который делает ход в игре.\n Тут мы вызываем метод ask, делаем выстрел по вражеской доске (метод Board.shot),\n отлавливаем исключения, и если они есть, пытаемся повторить ход. Метод должен возвращать True,\n если этому игроку нужен повторный ход (например если он выстрелом подбил корабль).\n \"\"\"\n while True:\n try:\n shot = self.enemy_board.shot(self.ask())\n return shot\n except (BoardOutException, ReshootException) as e:\n print(e)\n\n\nclass AI(Player):\n def ask(self):\n x = random.randint(0, self.own_board.size - 1)\n y = random.randint(0, self.own_board.size - 1)\n return Dot(x, y)\n\n\nclass User(Player):\n def ask(self):\n while True:\n cords = input(\"Ваш ход: \").split()\n\n if len(cords) != 2:\n print(\"Нужна 2 числа\")\n continue\n\n x, y = cords\n\n if not (x.isdigit() and y.isdigit()):\n print(\"Только цифры\")\n continue\n\n x, y = int(x), int(y)\n\n return Dot(x - 1, y - 1)\n\n\nclass Game:\n def __init__(self):\n self.board_ai = self.random_board()\n self.board_user = self.random_board()\n self.board_ai.hid = True\n self.player_user = User(self.board_user, self.board_ai)\n self.player_ai = AI(self.board_ai, self.board_user)\n\n def random_board(self):\n board = Board()\n ships = [(1, 3), (2, 2), (4, 1)]\n for ship in ships:\n\n for _ in range(ship[0]):\n temp = 0\n while temp < 1000:\n try:\n start_point = Dot(random.randint(0, board.size), random.randint(0, board.size))\n sh = Ship(ship[1], start_point, random.randint(0, 1))\n board.add_ship(sh)\n except ShipAssignment:\n temp += 1\n else:\n break\n\n while board.live_ships != 7:\n self.random_board()\n return board\n\n def greet(self):\n print(\"Добро пожаловать в игру Морской бой!\")\n print(\"Координаты задаются в формате 'x y', где x - цифра (1-6), y - цифра (1-6)\")\n print(\"Например, '1 1' - координаты левой верхней клетки игрового поля\")\n\n def loop(self):\n num = 0\n while True:\n print(\"-\" * 25)\n print(\"Доска игрока:\")\n print(\"-\" * 25)\n self.board_user.show_board()\n print(\"-\" * 25)\n print(\"Доска врага:\")\n print(\"-\" * 25)\n self.board_ai.show_board()\n print(\"-\" * 25)\n\n if num % 2 == 0:\n print(\"Ходит пользователь!\")\n repeat = self.player_user.move()\n else:\n print(\"Ходит компьютер!\")\n repeat = self.player_ai.move()\n if repeat:\n num -= 1\n\n if self.board_ai.live_ships == 0:\n print(\"Вы победили!\")\n break\n\n if self.board_user.live_ships == 0:\n print(\"Компьютер победил!\")\n break\n\n num += 1\n\n def start(self):\n self.greet()\n self.loop()\n\n\nif __name__ == '__main__':\n g = Game()\n g.start()\n","repo_name":"kokojoza/AlmostANavalBattle","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9867,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42687445311","text":"# 因为有\"( [ ) ]\"案例的存在,我能想到的是第一个思路是从中间开始判断,因为括号都是成对出现的。\n# 但后来想想不太好实现,又考虑了一下规律:\n# 1. 左括号后面必须跟对应的右括号或者其他类型的左括号\n# 2. 可以用先进后出栈来实现,遇上对应的括号对就弹出去\n\n\nclass Solution:\n def isValid(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n brRight = {')': '(',\n ']': '[',\n '}': '{'}\n pair = []\n if len(s) == 0: # 空集成立\n return True\n if len(s) % 2 != 0: # valid必然为偶数长度\n return False\n for br in s:\n pair.append(br)\n if len(pair) < 2:\n continue\n # 碰到右括号,就判定前一个字符是否是对应的左括号\n if pair[-1] in brRight:\n if pair[-2] == brRight[pair[-1]]:\n pair.pop()\n pair.pop()\n continue\n else:\n return False\n else:\n continue\n\n if len(pair) == 0:\n return True\n else:\n return False\n","repo_name":"zsyc/LeetAlgo","sub_path":"0020_ValidParent.py","file_name":"0020_ValidParent.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7990762845","text":"import json\nimport os\nfrom dataclasses import asdict, dataclass\nfrom functools import partial\nfrom typing import List\n\nimport numpy as np\nimport torch\nfrom scipy.stats import pearsonr, spearmanr\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom utils.dataset_util import selection_collate_fn\n\n\n@dataclass\nclass EvaluationExample:\n history: List[str]\n answer: str\n response: str\n score: float\n modelname: str = None\n fact: List[str] = None\n\n \ndef save_prediction_output(output_fname: str, examples: List, predictions: List[float]):\n SAVEKEY = [\"history\", \"answer\", \"response\", \"score\", \"modelname\", \"fact\"]\n os.makedirs(os.path.dirname(output_fname), exist_ok=True)\n with open(output_fname, \"w\") as f:\n assert len(examples) == len(predictions)\n for i, e in enumerate(examples):\n e = {k: v for k, v in asdict(e).items() if k in SAVEKEY}\n e[\"pred_score\"] = float(predictions[i])\n f.write(json.dumps(e) + \"\\n\")\n\n\ndef get_conv_repr(args, dataset, settype: str, tokenizer, reranker) -> np.ndarray:\n num_candidate = args.num_negative + 1 if settype == \"original\" else 2 # response and answer\n\n partial_selection_collate_fn = partial(selection_collate_fn, pad_id=tokenizer.pad_token_id)\n loader = DataLoader(\n dataset,\n shuffle=False,\n batch_size=args.eval_batch_size,\n collate_fn=partial_selection_collate_fn,\n drop_last=False,\n )\n hidden_repr_list = []\n for idx, batch in enumerate(tqdm(loader)):\n ids, mask = (e.to(\"cuda\", non_blocking=True) for e in batch)\n bs = int(len(ids) / num_candidate)\n\n # Ignore the negative from the second\n ids = ids.reshape(bs, num_candidate, -1)[:, 0]\n mask = mask.reshape(bs, num_candidate, -1)[:, 0]\n\n with torch.no_grad():\n _, hidden = reranker(ids, mask, return_hidden=True)\n\n hidden = hidden.reshape(bs, args.hidden_repr_dim).cpu().numpy()\n hidden_repr_list.append(hidden)\n\n reprs = np.concatenate(hidden_repr_list, 0) # |dataset| X model_dim\n return reprs\n\n\ndef get_correlation(humanscores: List[float], modelscores: List[float]):\n assert len(humanscores) == len(modelscores)\n pearson = pearsonr(humanscores, modelscores)\n spearman = spearmanr(humanscores, modelscores)\n\n item = {\n \"pearson-value\": pearson[0],\n \"pearson-p\": pearson[1],\n \"spearman-value\": spearman[0],\n \"spearman-p\": spearman[1],\n }\n return item\n","repo_name":"ddehun/DEnsity","sub_path":"evaluators/eval_utils.py","file_name":"eval_utils.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"21519044536","text":"import pickle\nimport os\nfileName='Camera.cfg'\n\ndef write(values, fileName):\n filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)\n with open(filename, 'wb') as f:\n pickle.dump(values, f)\n\ndef read(fileName):\n filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)\n with open(filename, 'rb') as f:\n return pickle.load(f)\n\ndef RGBorHSV():\n inp = input('RGB or HSV?')\n inp = inp.lower()\n if inp == 'r':\n return 'RGB'\n elif inp == 'h':\n return 'HSV'\n else:\n return RGBorHSV()\n","repo_name":"GastricFluid/LHS_Robotics_2016-2017","sub_path":"Imaging/CameraConfig.py","file_name":"CameraConfig.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"20524180409","text":"#\r\n# This file is part of the PyMeasure package.\r\n#\r\n# Copyright (c) 2013-2023 PyMeasure Developers\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in\r\n# all copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r\n# THE SOFTWARE.\r\n#\r\n\r\nfrom pymeasure.instruments.validators import strict_discrete_set, \\\r\n truncated_discrete_set, truncated_range\r\nfrom pymeasure.instruments import Instrument\r\n\r\n\r\nclass SR860(Instrument):\r\n\r\n SENSITIVITIES = [\r\n 1e-9, 2e-9, 5e-9, 10e-9, 20e-9, 50e-9, 100e-9, 200e-9,\r\n 500e-9, 1e-6, 2e-6, 5e-6, 10e-6, 20e-6, 50e-6, 100e-6,\r\n 200e-6, 500e-6, 1e-3, 2e-3, 5e-3, 10e-3, 20e-3,\r\n 50e-3, 100e-3, 200e-3, 500e-3, 1\r\n ]\r\n TIME_CONSTANTS = [\r\n 1e-6, 3e-6, 10e-6, 30e-6, 100e-6, 300e-6, 1e-3, 3e-3, 10e-3,\r\n 30e-3, 100e-3, 300e-3, 1, 3, 10, 30, 100, 300, 1e3,\r\n 3e3, 10e3, 30e3\r\n ]\r\n ON_OFF_VALUES = ['0', '1']\r\n SCREEN_LAYOUT_VALUES = ['0', '1', '2', '3', '4', '5']\r\n EXPANSION_VALUES = ['0', '1', '2,']\r\n CHANNEL_VALUES = ['OCH1', 'OCH2']\r\n OUTPUT_VALUES = ['XY', 'RTH']\r\n INPUT_TIMEBASE = ['AUTO', 'IN']\r\n INPUT_DCMODE = ['COM', 'DIF', 'common', 'difference']\r\n INPUT_REFERENCESOURCE = ['INT', 'EXT', 'DUAL', 'CHOP']\r\n INPUT_REFERENCETRIGGERMODE = ['SIN', 'POS', 'NEG', 'POSTTL', 'NEGTTL']\r\n INPUT_REFERENCEEXTERNALINPUT = ['50OHMS', '1MEG']\r\n INPUT_SIGNAL_INPUT = ['VOLT', 'CURR', 'voltage', 'current']\r\n INPUT_VOLTAGE_MODE = ['A', 'A-B']\r\n INPUT_COUPLING = ['AC', 'DC']\r\n INPUT_SHIELDS = ['Float', 'Ground']\r\n INPUT_RANGE = ['1V', '300M', '100M', '30M', '10M']\r\n INPUT_GAIN = ['1MEG', '100MEG']\r\n INPUT_FILTER = ['Off', 'On']\r\n LIST_PARAMETER = ['i=', '0=Xoutput', '1=Youtput', '2=Routput', 'Thetaoutput', '4=Aux IN1',\r\n '5=Aux IN2', '6=Aux IN3', '7=Aux IN4', '8=Xnoise', '9=Ynoise',\r\n '10=AUXOut1', '11=AuxOut2', '12=Phase', '13=Sine Out amplitude',\r\n '14=DCLevel', '15I=nt.referenceFreq', '16=Ext.referenceFreq']\r\n LIST_HORIZONTAL_TIME_DIV = ['0=0.5s', '1=1s', '2=2s', '3=5s', '4=10s', '5=30s', '6=1min',\r\n '7=2min', '8=5min', '9=10min', '10=30min', '11=1hour', '12=2hour',\r\n '13=6hour', '14=12hour', '15=1day', '16=2days']\r\n\r\n x = Instrument.measurement(\"OUTP? 0\",\r\n \"\"\" Reads the X value in Volts \"\"\"\r\n )\r\n y = Instrument.measurement(\"OUTP? 1\",\r\n \"\"\" Reads the Y value in Volts \"\"\"\r\n )\r\n magnitude = Instrument.measurement(\"OUTP? 2\",\r\n \"\"\" Reads the magnitude in Volts. \"\"\"\r\n )\r\n theta = Instrument.measurement(\"OUTP? 3\",\r\n \"\"\" Reads the theta value in degrees. \"\"\"\r\n )\r\n phase = Instrument.control(\r\n \"PHAS?\", \"PHAS %0.7f\",\r\n \"\"\" A floating point property that represents the lock-in phase\r\n in degrees. This property can be set. \"\"\",\r\n validator=truncated_range,\r\n values=[-360, 360]\r\n )\r\n frequency = Instrument.control(\r\n \"FREQ?\", \"FREQ %0.6e\",\r\n \"\"\" A floating point property that represents the lock-in frequency\r\n in Hz. This property can be set. \"\"\",\r\n validator=truncated_range,\r\n values=[0.001, 500000]\r\n )\r\n internalfrequency = Instrument.control(\r\n \"FREQINT?\", \"FREQINT %0.6e\",\r\n \"\"\"A floating property that represents the internal lock-in frequency in Hz\r\n This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[0.001, 500000]\r\n )\r\n harmonic = Instrument.control(\r\n \"HARM?\", \"Harm %d\",\r\n \"\"\"An integer property that controls the harmonic that is measured.\r\n Allowed values are 1 to 99. Can be set.\"\"\",\r\n validator=strict_discrete_set,\r\n values=range(1, 99)\r\n )\r\n harmonicdual = Instrument.control(\r\n \"HARMDUAL?\", \"HARMDUAL %d\",\r\n \"\"\"An integer property that controls the harmonic in dual reference mode that is measured.\r\n Allowed values are 1 to 99. Can be set.\"\"\",\r\n validator=strict_discrete_set,\r\n values=range(1, 99)\r\n )\r\n sine_voltage = Instrument.control(\r\n \"SLVL?\", \"SLVL %0.9e\",\r\n \"\"\"A floating point property that represents the reference sine-wave\r\n voltage in Volts. This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[1e-9, 2]\r\n )\r\n\r\n timebase = Instrument.control(\r\n \"TBMODE?\", \"TBMODE %d\",\r\n \"\"\"Sets the external 10 MHZ timebase to auto(i=0) or internal(i=1).\"\"\",\r\n validator=strict_discrete_set,\r\n values=[0, 1],\r\n map_values=True\r\n )\r\n dcmode = Instrument.control(\r\n \"REFM?\", \"REFM %d\",\r\n \"\"\"A string property that represents the sine out dc mode.\r\n This property can be set. Allowed values are:{}\"\"\".format(INPUT_DCMODE),\r\n validator=strict_discrete_set,\r\n values=INPUT_DCMODE,\r\n map_values=True\r\n )\r\n reference_source = Instrument.control(\r\n \"RSRC?\", \"RSRC %d\",\r\n \"\"\"A string property that represents the reference source.\r\n This property can be set. Allowed values are:{}\"\"\".format(INPUT_REFERENCESOURCE),\r\n validator=strict_discrete_set,\r\n values=INPUT_REFERENCESOURCE,\r\n map_values=True\r\n )\r\n reference_triggermode = Instrument.control(\r\n \"RTRG?\", \"RTRG %d\",\r\n \"\"\"A string property that represents the external reference trigger mode.\r\n This property can be set. Allowed values are:{}\"\"\".format(INPUT_REFERENCETRIGGERMODE),\r\n validator=strict_discrete_set,\r\n values=INPUT_REFERENCETRIGGERMODE,\r\n map_values=True\r\n )\r\n reference_externalinput = Instrument.control(\r\n \"REFZ?\", \"REFZ&d\",\r\n \"\"\"A string property that represents the external reference input.\r\n This property can be set. Allowed values are:{}\"\"\".format(INPUT_REFERENCEEXTERNALINPUT),\r\n validator=strict_discrete_set,\r\n values=INPUT_REFERENCEEXTERNALINPUT,\r\n map_values=True\r\n )\r\n input_signal = Instrument.control(\r\n \"IVMD?\", \"IVMD %d\",\r\n \"\"\"A string property that represents the signal input.\r\n This property can be set. Allowed values are:{}\"\"\".format(INPUT_SIGNAL_INPUT),\r\n validator=strict_discrete_set,\r\n values=INPUT_SIGNAL_INPUT,\r\n map_values=True\r\n )\r\n input_voltage_mode = Instrument.control(\r\n \"ISRC?\", \"ISRC %d\",\r\n \"\"\"A string property that represents the voltage input mode.\r\n This property can be set. Allowed values are:{}\"\"\".format(INPUT_VOLTAGE_MODE),\r\n validator=strict_discrete_set,\r\n values=INPUT_VOLTAGE_MODE,\r\n map_values=True\r\n )\r\n input_coupling = Instrument.control(\r\n \"ICPL?\", \"ICPL %d\",\r\n \"\"\"A string property that represents the input coupling.\r\n This property can be set. Allowed values are:{}\"\"\".format(INPUT_COUPLING),\r\n validator=strict_discrete_set,\r\n values=INPUT_COUPLING,\r\n map_values=True\r\n )\r\n input_shields = Instrument.control(\r\n \"IGND?\", \"IGND %d\",\r\n \"\"\"A string property that represents the input shield grounding.\r\n This property can be set. Allowed values are:{}\"\"\".format(INPUT_SHIELDS),\r\n validator=strict_discrete_set,\r\n values=INPUT_SHIELDS,\r\n map_values=True\r\n )\r\n input_range = Instrument.control(\r\n \"IRNG?\", \"IRNG %d\",\r\n \"\"\"A string property that represents the input range.\r\n This property can be set. Allowed values are:{}\"\"\".format(INPUT_RANGE),\r\n validator=strict_discrete_set,\r\n values=INPUT_RANGE,\r\n map_values=True\r\n )\r\n input_current_gain = Instrument.control(\r\n \"ICUR?\", \"ICUR %d\",\r\n \"\"\"A string property that represents the current input gain.\r\n This property can be set. Allowed values are:{}\"\"\".format(INPUT_GAIN),\r\n validator=strict_discrete_set,\r\n values=INPUT_GAIN,\r\n map_values=True\r\n )\r\n sensitvity = Instrument.control(\r\n \"SCAL?\", \"SCAL %d\",\r\n \"\"\" A floating point property that controls the sensitivity in Volts,\r\n which can take discrete values from 2 nV to 1 V. Values are truncated\r\n to the next highest level if they are not exact. \"\"\",\r\n validator=truncated_discrete_set,\r\n values=SENSITIVITIES,\r\n map_values=True\r\n )\r\n time_constant = Instrument.control(\r\n \"OFLT?\", \"OFLT %d\",\r\n \"\"\" A floating point property that controls the time constant\r\n in seconds, which can take discrete values from 10 microseconds\r\n to 30,000 seconds. Values are truncated to the next highest\r\n level if they are not exact. \"\"\",\r\n validator=truncated_discrete_set,\r\n values=TIME_CONSTANTS,\r\n map_values=True\r\n )\r\n filter_slope = Instrument.control(\r\n \"OFSL?\", \"OFSL %d\",\r\n \"\"\"A integer property that sets the filter slope to 6 dB/oct(i=0), 12 DB/oct(i=1),\r\n 18 dB/oct(i=2), 24 dB/oct(i=3).\"\"\",\r\n validator=strict_discrete_set,\r\n values=range(0, 3)\r\n )\r\n filer_synchronous = Instrument.control(\r\n \"SYNC?\", \"SYNC %d\",\r\n \"\"\"A string property that represents the synchronous filter.\r\n This property can be set. Allowed values are:{}\"\"\".format(INPUT_FILTER),\r\n validator=strict_discrete_set,\r\n values=INPUT_FILTER,\r\n map_values=True\r\n )\r\n filter_advanced = Instrument.control(\r\n \"ADVFILT?\", \"ADVFIL %d\",\r\n \"\"\"A string property that represents the advanced filter.\r\n This property can be set. Allowed values are:{}\"\"\".format(INPUT_FILTER),\r\n validator=strict_discrete_set,\r\n values=INPUT_FILTER,\r\n map_values=True\r\n )\r\n frequencypreset1 = Instrument.control(\r\n \"PSTF? 0\", \"PSTF 0, %0.6e\",\r\n \"\"\"A floating point property that represents the preset frequency for the F1 preset button.\r\n This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[0.001, 500000]\r\n )\r\n frequencypreset2 = Instrument.control(\r\n \"PSTF? 1\", \"PSTF 1, %0.6e\",\r\n \"\"\"A floating point property that represents the preset frequency for the F2 preset button.\r\n This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[0.001, 500000]\r\n )\r\n frequencypreset3 = Instrument.control(\r\n \"PSTF? 2\", \"PSTF2, %0.6e\",\r\n \"\"\"A floating point property that represents the preset frequency for the F3 preset button.\r\n This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[0.001, 500000]\r\n )\r\n frequencypreset4 = Instrument.control(\r\n \"PSTF? 3\", \"PSTF3, %0.6e\",\r\n \"\"\"A floating point property that represents the preset frequency for the F4 preset button.\r\n This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[0.001, 500000]\r\n )\r\n sine_amplitudepreset1 = Instrument.control(\r\n \"PSTA? 0\", \"PSTA0, %0.9e\",\r\n \"\"\"Floating point property representing the preset sine out amplitude, for the A1 preset button.\r\n This property can be set.\"\"\", # noqa: E501\r\n validator=truncated_range,\r\n values=[1e-9, 2]\r\n )\r\n sine_amplitudepreset2 = Instrument.control(\r\n \"PSTA? 1\", \"PSTA1, %0.9e\",\r\n \"\"\"Floating point property representing the preset sine out amplitude, for the A2 preset button.\r\n This property can be set.\"\"\", # noqa: E501\r\n validator=truncated_range,\r\n values=[1e-9, 2]\r\n )\r\n sine_amplitudepreset3 = Instrument.control(\r\n \"PSTA? 2\", \"PSTA2, %0.9e\",\r\n \"\"\"Floating point property representing the preset sine out amplitude, for the A3 preset button.\r\n This property can be set.\"\"\", # noqa: E501\r\n validator=truncated_range,\r\n values=[1e-9, 2]\r\n )\r\n sine_amplitudepreset4 = Instrument.control(\r\n \"PSTA? 3\", \"PSTA 3, %0.9e\",\r\n \"\"\"Floating point property representing the preset sine out amplitude, for the A3 preset button.\r\n This property can be set.\"\"\", # noqa: E501\r\n validator=truncated_range,\r\n values=[1e-9, 2]\r\n )\r\n sine_dclevelpreset1 = Instrument.control(\r\n \"PSTL? 0\", \"PSTL 0, %0.3e\",\r\n \"\"\"A floating point property that represents the preset sine out dc level for the L1 button.\r\n This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[-5, 5]\r\n )\r\n sine_dclevelpreset2 = Instrument.control(\r\n \"PSTL? 1\", \"PSTL 1, %0.3e\",\r\n \"\"\"A floating point property that represents the preset sine out dc level for the L2 button.\r\n This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[-5, 5]\r\n )\r\n sine_dclevelpreset3 = Instrument.control(\r\n \"PSTL? 2\", \"PSTL 2, %0.3e\",\r\n \"\"\"A floating point property that represents the preset sine out dc level for the L3 button.\r\n This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[-5, 5]\r\n )\r\n sine_dclevelpreset4 = Instrument.control(\r\n \"PSTL? 3\", \"PSTL3, %0.3e\",\r\n \"\"\"A floating point property that represents the preset sine out dc level for the L4 button.\r\n This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[-5, 5]\r\n )\r\n\r\n aux_out_1 = Instrument.control(\r\n \"AUXV? 0\", \"AUXV 1, %f\",\r\n \"\"\" A floating point property that controls the output of Aux output 1 in\r\n Volts, taking values between -10.5 V and +10.5 V.\r\n This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[-10.5, 10.5]\r\n )\r\n # For consistency with other lock-in instrument classes\r\n dac1 = aux_out_1\r\n\r\n aux_out_2 = Instrument.control(\r\n \"AUXV? 1\", \"AUXV 2, %f\",\r\n \"\"\" A floating point property that controls the output of Aux output 2 in\r\n Volts, taking values between -10.5 V and +10.5 V.\r\n This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[-10.5, 10.5]\r\n )\r\n # For consistency with other lock-in instrument classes\r\n dac2 = aux_out_2\r\n\r\n aux_out_3 = Instrument.control(\r\n \"AUXV? 2\", \"AUXV 3, %f\",\r\n \"\"\" A floating point property that controls the output of Aux output 3 in\r\n Volts, taking values between -10.5 V and +10.5 V.\r\n This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[-10.5, 10.5]\r\n )\r\n # For consistency with other lock-in instrument classes\r\n dac3 = aux_out_3\r\n\r\n aux_out_4 = Instrument.control(\r\n \"AUXV? 3\", \"AUXV 4, %f\",\r\n \"\"\" A floating point property that controls the output of Aux output 4 in\r\n Volts, taking values between -10.5 V and +10.5 V.\r\n This property can be set.\"\"\",\r\n validator=truncated_range,\r\n values=[-10.5, 10.5]\r\n )\r\n # For consistency with other lock-in instrument classes\r\n dac4 = aux_out_4\r\n\r\n aux_in_1 = Instrument.measurement(\r\n \"OAUX? 0\",\r\n \"\"\" Reads the Aux input 1 value in Volts with 1/3 mV resolution. \"\"\"\r\n )\r\n # For consistency with other lock-in instrument classes\r\n adc1 = aux_in_1\r\n\r\n aux_in_2 = Instrument.measurement(\r\n \"OAUX? 1\",\r\n \"\"\" Reads the Aux input 2 value in Volts with 1/3 mV resolution. \"\"\"\r\n )\r\n # For consistency with other lock-in instrument classes\r\n adc2 = aux_in_2\r\n\r\n aux_in_3 = Instrument.measurement(\r\n \"OAUX? 2\",\r\n \"\"\" Reads the Aux input 3 value in Volts with 1/3 mV resolution. \"\"\"\r\n )\r\n # For consistency with other lock-in instrument classes\r\n adc3 = aux_in_3\r\n\r\n aux_in_4 = Instrument.measurement(\r\n \"OAUX? 3\",\r\n \"\"\" Reads the Aux input 4 value in Volts with 1/3 mV resolution. \"\"\"\r\n )\r\n # For consistency with other lock-in instrument classes\r\n adc4 = aux_in_4\r\n\r\n def snap(self, val1=\"X\", val2=\"Y\", val3=None):\r\n \"\"\"retrieve 2 or 3 parameters at once\r\n parameters can be chosen by index, or enumeration as follows:\r\n\r\n +--------+-------------+------------------------+\r\n | index | enumeration | parameter |\r\n +========+=============+========================+\r\n | 0 | X | X output |\r\n +--------+-------------+------------------------+\r\n | 1 | Y | Y output |\r\n +--------+-------------+------------------------+\r\n | 2 | R | R output |\r\n +--------+-------------+------------------------+\r\n | 3 | THeta | θ output |\r\n +--------+-------------+------------------------+\r\n | 4 | IN1 | Aux In1 |\r\n +--------+-------------+------------------------+\r\n | 5 | IN2 | Aux In2 |\r\n +--------+-------------+------------------------+\r\n | 6 | IN3 | Aux In3 |\r\n +--------+-------------+------------------------+\r\n | 7 | IN4 | Aux In4 |\r\n +--------+-------------+------------------------+\r\n | 8 | XNOise | Xnoise |\r\n +--------+-------------+------------------------+\r\n | 9 | YNOise | Ynoise |\r\n +--------+-------------+------------------------+\r\n | 10 | OUT1 | Aux Out1 |\r\n +--------+-------------+------------------------+\r\n | 11 | OUT2 | Aux Out2 |\r\n +--------+-------------+------------------------+\r\n | 12 | PHAse | Reference Phase |\r\n +--------+-------------+------------------------+\r\n | 13 | SAMp | Sine Out Amplitude |\r\n +--------+-------------+------------------------+\r\n | 14 | LEVel | DC Level |\r\n +--------+-------------+------------------------+\r\n | 15 | FInt | Int. Ref. Frequency |\r\n +--------+-------------+------------------------+\r\n | 16 | FExt | Ext. Ref. Frequency |\r\n +--------+-------------+------------------------+\r\n\r\n :param val1: parameter enumeration/index\r\n :param val2: parameter enumeration/index\r\n :param val3: parameter enumeration/index (optional)\r\n\r\n Defaults:\r\n val1 = \"X\"\r\n val2 = \"Y\"\r\n val3 = None\r\n \"\"\"\r\n if val3 is None:\r\n return self.values(\r\n command=f\"SNAP? {val1}, {val2}\",\r\n separator=\",\",\r\n cast=float,\r\n )\r\n else:\r\n return self.values(\r\n command=f\"SNAP? {val1}, {val2}, {val3}\",\r\n separator=\",\",\r\n cast=float,\r\n )\r\n\r\n gettimebase = Instrument.measurement(\r\n \"TBSTAT?\",\r\n \"\"\"Returns the current 10 MHz timebase source.\"\"\"\r\n )\r\n extfreqency = Instrument.measurement(\r\n \"FREQEXT?\",\r\n \"\"\"Returns the external frequency in Hz.\"\"\"\r\n )\r\n detectedfrequency = Instrument.measurement(\r\n \"FREQDET?\",\r\n \"\"\"Returns the actual detected frequency in HZ.\"\"\"\r\n )\r\n get_signal_strength_indicator = Instrument.measurement(\r\n \"ILVL?\",\r\n \"\"\"Returns the signal strength indicator.\"\"\"\r\n )\r\n get_noise_bandwidth = Instrument.measurement(\r\n \"ENBW?\",\r\n \"\"\"Returns the equivalent noise bandwidth, in hertz.\"\"\"\r\n )\r\n # Display Commands\r\n front_panel = Instrument.control(\r\n \"DBLK?\", \"DBLK %i\",\r\n \"\"\"Turns the front panel blanking on(i=0) or off(i=1).\"\"\",\r\n validator=strict_discrete_set,\r\n values=ON_OFF_VALUES,\r\n map_values=True\r\n )\r\n screen_layout = Instrument.control(\r\n \"DLAY?\", \"DLAY %i\",\r\n \"\"\"A integer property that Sets the screen layout to trend(i=0), full strip chart\r\n history(i=1), half strip chart history(i=2), full FFT(i=3), half FFT(i=4) or big\r\n numerical(i=5).\"\"\",\r\n validator=strict_discrete_set,\r\n values=SCREEN_LAYOUT_VALUES,\r\n map_values=True\r\n )\r\n\r\n def screenshot(self):\r\n \"\"\"Take screenshot on device\r\n The DCAP command saves a screenshot to a USB memory stick.\r\n This command is the same as pressing the [Screen Shot] key.\r\n A USB memory stick must be present in the front panel USB port.\r\n \"\"\"\r\n self.write(\"DCAP\")\r\n\r\n parameter_DAT1 = Instrument.control(\r\n \"CDSP? 0\", \"CDSP 0, %i\",\r\n \"\"\"A integer property that assigns a parameter to data channel 1(green).\r\n This parameters can be set. Allowed values are:{}\"\"\".format(LIST_PARAMETER),\r\n validator=strict_discrete_set,\r\n values=range(0, 16)\r\n )\r\n parameter_DAT2 = Instrument.control(\r\n \"CDSP? 1\", \"CDSP 1, %i\",\r\n \"\"\"A integer property that assigns a parameter to data channel 2(blue).\r\n This parameters can be set. Allowed values are:{}\"\"\".format(LIST_PARAMETER),\r\n validator=strict_discrete_set,\r\n values=range(0, 16)\r\n )\r\n parameter_DAT3 = Instrument.control(\r\n \"CDSP? 2\", \"CDSP 2, %i\",\r\n \"\"\"A integer property that assigns a parameter to data channel 3(yellow).\r\n This parameters can be set. Allowed values are:{}\"\"\".format(LIST_PARAMETER),\r\n validator=strict_discrete_set,\r\n values=range(0, 16)\r\n )\r\n parameter_DAT4 = Instrument.control(\r\n \"CDSP? 3\", \"CDSP 3, %i\",\r\n \"\"\"A integer property that assigns a parameter to data channel 3(orange).\r\n This parameters can be set. Allowed values are:{}\"\"\".format(LIST_PARAMETER),\r\n validator=strict_discrete_set,\r\n values=range(0, 16)\r\n )\r\n strip_chart_dat1 = Instrument.control(\r\n \"CGRF? 0\", \"CGRF 0, %i\",\r\n \"\"\"A integer property that turns the strip chart graph of data channel 1 off(i=0) or on(i=1).\r\n \"\"\", # noqa: E501\r\n validator=strict_discrete_set,\r\n values=ON_OFF_VALUES,\r\n map_values=True\r\n )\r\n strip_chart_dat2 = Instrument.control(\r\n \"CGRF? 1\", \"CGRF 1, %i\",\r\n \"\"\"A integer property that turns the strip chart graph of data channel 2 off(i=0) or on(i=1).\r\n \"\"\", # noqa: E501\r\n validator=strict_discrete_set,\r\n values=ON_OFF_VALUES,\r\n map_values=True\r\n )\r\n strip_chart_dat3 = Instrument.control(\r\n \"CGRF? 2\", \"CGRF 2, %i\",\r\n \"\"\"A integer property that turns the strip chart graph of data channel 1 off(i=0) or on(i=1).\r\n \"\"\", # noqa: E501\r\n validator=strict_discrete_set,\r\n values=ON_OFF_VALUES,\r\n map_values=True\r\n )\r\n strip_chart_dat4 = Instrument.control(\r\n \"CGRF? 3\", \"CGRF 3, %i\",\r\n \"\"\"A integer property that turns the strip chart graph of data channel 4 off(i=0) or on(i=1).\r\n \"\"\", # noqa: E501\r\n validator=strict_discrete_set,\r\n values=ON_OFF_VALUES,\r\n map_values=True\r\n )\r\n # Strip Chart commands\r\n horizontal_time_div = Instrument.control(\r\n \"GSPD?\", \"GSDP %i\",\r\n \"\"\"A integer property for the horizontal time/div according to the following table:{}\r\n \"\"\".format(LIST_HORIZONTAL_TIME_DIV),\r\n validator=strict_discrete_set,\r\n values=range(0, 16)\r\n )\r\n\r\n def __init__(self, adapter, name=\"Stanford Research Systems SR860 Lock-in amplifier\",\r\n **kwargs):\r\n super().__init__(\r\n adapter,\r\n name,\r\n **kwargs\r\n )\r\n","repo_name":"pymeasure/pymeasure","sub_path":"pymeasure/instruments/srs/sr860.py","file_name":"sr860.py","file_ext":"py","file_size_in_byte":24786,"program_lang":"python","lang":"en","doc_type":"code","stars":514,"dataset":"github-code","pt":"81"} +{"seq_id":"9707343305","text":"import numpy as np\nfrom scipy.sparse import lil_matrix, dok_matrix, save_npz, load_npz\nimport time\nfrom threading import Thread\n\nrank_filepath = 'C:/Users/achernyshev/Documents/la_project/pagerank_result.txt'\npages_filepath = 'C:/Users/achernyshev/Documents/la_project/pages.txt'\nm_filepath_pattern = 'C:/Users/achernyshev/Documents/la_project/m2-%d.npz'\nconvergence_threshold = 0.000_000_1\ntop_count = 20\n\ndef has_converged(x_next, x_prev):\n return all(abs(xi) <= convergence_threshold for xi in x_next-x_prev)\n\nmx_count = 100\npage_count = sum(1 for line in open(pages_filepath, encoding=\"utf8\"))\nstart_rank = 1 / page_count\nx = np.full(page_count, start_rank)\ncoef = 0.15\n\nmxs = []\nfor i in range(1, mx_count+1):\n A = load_npz(m_filepath_pattern % (i))\n mxs.append(A)\n print('Matrix %d (%s) loaded' % (i, str(A.shape)))\nstart = time.time()\n \nfor i in range(100):\n print(i, end=' ')\n xis = [(1-coef)*mx.dot(x) + coef/page_count for mx in mxs]\n x_next = np.concatenate((xis))\n x_next = x_next / np.linalg.norm(x_next, ord=1)\n if has_converged(x_next, x):\n break\n x = x_next\n \nmxs = []\nprint('\\nPage Rank vector converged with tolerance: %s' % convergence_threshold)\n\npages = [line.split()[1] for line in open(pages_filepath, encoding=\"utf8\")]\ntop_els = list(zip(x, pages))\ntop_els.sort(key=lambda el: el[0], reverse=True)\nwith open(rank_filepath, \"w+\", encoding=\"utf8\") as f:\n f.writelines(['%s\\n' % str(el) for el in top_els])\nprint('Execution completed in: %d sec' % (time.time() - start))","repo_name":"Agney19/wikipedia_pagerank","sub_path":"calculate_pagerank.py","file_name":"calculate_pagerank.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23549077336","text":"import os, pip\n\n\ndef __read(fname):\n try:\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n except IOError:\n return ''\n\ndef install(package):\n pip.main(['install', package])\n\n\ninstall_requires = __read('requirements.txt').split('\\n')\n\nfor package in install_requires:\n install(package)\n\n","repo_name":"yernarkaz/adsw_service","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11347282893","text":"import numpy as np\nimport unittest\n\nfrom komorebi.libs.utilities.array_utils import ensure_samples_match, partition_indices\n\nclass TestArrayUtils(unittest.TestCase):\n \"\"\"Tests for array utils.\"\"\"\n\n def test_ensure_samples_match_raises(self):\n \"\"\"Ensure samples raises for mismatch in array sizes.\"\"\"\n self.assertRaises(ValueError, ensure_samples_match, np.arange(100), np.arange(4))\n\n def test_ensure_samples_returns_correct_samples(self):\n \"\"\"Ensure samples should return number of samples for matching array sizes along axis 0.\"\"\"\n expected_samples = 100\n dummy_array = np.arange(expected_samples)\n self.assertEqual(ensure_samples_match(dummy_array, dummy_array), expected_samples)\n\n def test_partition_indices_returns_correct_indices(self):\n number_of_samples = 3\n partition_size = 2\n\n # [0, 1, 2] -> [ [0, 1], [2] ]\n expected_indices = [np.array([0, 1]), np.array([2])]\n actual_indices, _ = partition_indices(np.arange(number_of_samples), partition_size)\n\n self.assertEqual(len(actual_indices), len(expected_indices))\n for (ai, ei) in zip(actual_indices, expected_indices):\n np.testing.assert_array_equal(ai, ei)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"andylee024/komorebi","sub_path":"libs/utilities/tests/test_array_utils.py","file_name":"test_array_utils.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18701726757","text":"from collections import defaultdict\n# 접시 n, 초밥 d, 연속먹기 k, 쿠폰 번호 c\nn, d, k, c = map(int, input().split())\nsushi = [int(input()) for _ in range(n)]\n\ncase = set()\ncase_cnt = defaultdict(int)\nanswer = 0\n\nleft = 0\nright = k-1\n\nfor i in range(k):\n case.add(sushi[i])\n case_cnt[sushi[i]] += 1\n\nfor i in range(n):\n case_cnt[sushi[left]] -= 1\n if case_cnt[sushi[left]] == 0:\n case.remove(sushi[left])\n left = (left+1) % n\n\n right = (right+1) % n\n if sushi[right] not in case:\n case.add(sushi[right])\n case_cnt[sushi[right]] += 1\n\n if c not in case:\n total_case = len(case)+1\n else:\n total_case = len(case)\n \n answer = max(answer, total_case)\nprint(answer)","repo_name":"dev-dain/algorithm-study","sub_path":"source/suzy/week6/A2531.py","file_name":"A2531.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"25241689900","text":"import nextcord, asyncio, os, io, contextlib, textwrap, traceback\nfrom typing import Any, Optional\n\nfrom nextcord.ext import commands\nfrom nextcord.ui import Modal, TextInput\n\nfrom Misc.utilidades import Wer, Emojis\nfrom Misc.messages import DeleteMessageSlash\n\nclass SnekBox_Eval(nextcord.ui.Modal):\n def __init__(self) -> None:\n super().__init__(title=\"Avalia seu código\", custom_id=\"evaluate_code\")\n\n self._last_result: Optional[Any] = None\n\n self.add_item(\n nextcord.ui.TextInput(\n label=\"Seu código\",\n placeholder=\"print('Hello')\",\n custom_id=\"Código avaliado\",\n style=nextcord.TextInputStyle.paragraph,\n min_length=10\n ),\n )\n\n async def callback(self, inter: nextcord.Interaction) -> None:\n\n view = DeleteMessageSlash(inter)\n\n vars = {\n 'bot' : inter.client,\n 'ctx': commands.Context,\n 'interaction': inter,\n 'channel': inter.channel,\n 'author': inter.user,\n 'guild': inter.guild,\n 'message': inter.message,\n '_': self._last_result,\n 'nextcord': nextcord\n \n }\n\n vars.update(globals())\n\n \n embed = nextcord.Embed(title=\"Seu código\", description=\"✅ Seu código foi avaliado e aqui está o julgamento:\", color=0x00FF00)\n code = self.children[0].value\n to_compile = f'async def func():\\n{textwrap.indent(code, \" \")}'\n stdout = io.StringIO()\n\n try:\n exec(to_compile, vars)\n except Exception as e:\n return await inter.response.send_message(f'```py\\n{e.__class__.__name__}: {e}\\n```') \n\n func = vars['func'] \n\n try:\n with contextlib.redirect_stdout(stdout):\n res = await func()\n\n except Exception as e:\n value = stdout.getvalue()\n await inter.response.send_message(f'```py\\n{value}{traceback.format_exc()}\\n```')\n \n else:\n value = stdout.getvalue()\n try:\n await inter.message.add_reaction(Emojis.check)\n except:\n pass\n\n if Wer.token in value:\n value = \":warning: Nenhuma informação sensível foi encontrada no seu código.\"\n\n\n embed.add_field(name=\"Input Code\", value=f\"```py\\n{value}\\n```\", inline=False)\n\n if res is None:\n if not value:\n embed.add_field(name=\"Código avaliado:\", value=f\"{Emojis.decline} A execução do código não retornou nada.\", inline=False) \n else:\n self._last_result = res\n embed.add_field(f'```py\\n{value}{res}\\n```')\n await inter.response.send_message(embed=embed,view=view)\n\n async def on_error(self, error, interaction: nextcord.Interaction):\n view = DeleteMessageSlash(interaction)\n embed = nextcord.Embed(title=\"Code Status\", description=\":x: Um erro ocorreu.\", color=0xFF0000)\n embed.add_field(name=\":warning: O erro\", value=f\"```{error}```\", inline=False)\n await interaction.response.send_message(embed=embed,view=view) \n \nclass Eval(commands.Cog, description='Avalia seu código.'):\n\n \n COG_EMOJI = \"💻\"\n\n def __init__(self, bot):\n self.bot = bot\n \n\n @nextcord.slash_command(name=\"eval\", description=\"Avalia o código Python\")\n async def eval(self, interaction: nextcord.Interaction):\n await interaction.response.send_modal(modal=SnekBox_Eval())\n \n \n","repo_name":"WerG0D/WerBot---A-nextcord-bot","sub_path":"all/cogs/Eval/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"25210515484","text":"import superdesk\n\nfrom flask import request, current_app as app, json\nfrom superdesk.resource import build_custom_hateoas\n\nfrom apps.archive.archive import ArchiveResource, ArchiveService\nfrom apps.archive.common import CUSTOM_HATEOAS\n\n\ndef elastic_filter(req=None):\n where = request.args.get(\"where\")\n assert where, \"where not set\"\n\n params = json.loads(where)\n guid = params[\"guid\"]\n uri = params.get(\"uri\") or guid\n\n query = {\n \"bool\": {\n \"should\": [\n {\"term\": {\"refs.uri\": uri}},\n {\"term\": {\"refs._id\": guid}},\n {\"term\": {\"refs.guid\": guid}},\n ],\n },\n }\n\n LINKS_HOURS = app.config.get(\"LINKS_MAX_HOURS\")\n if LINKS_HOURS:\n query[\"bool\"].update(\n {\n \"minimum_should_match\": 1,\n \"must\": {\n \"range\": {\n \"versioncreated\": {\n \"gte\": \"now-{}h\".format(int(LINKS_HOURS)),\n },\n },\n },\n }\n )\n\n return query\n\n\nclass LinksResource(ArchiveResource):\n item_methods = []\n versioning = False\n resource_methods = [\"GET\"]\n datasource = ArchiveResource.datasource.copy()\n datasource.update(\n {\n \"source\": \"archive\",\n \"elastic_filter_callback\": elastic_filter,\n \"elastic_filter\": {\"bool\": {\"must_not\": {\"term\": {\"version\": 0}}}},\n }\n )\n\n\nclass LinksService(ArchiveService):\n def enhance_items(self, items):\n super().enhance_items(items)\n for item in items:\n build_custom_hateoas(CUSTOM_HATEOAS, item)\n\n def get(self, req, lookup):\n req.where = None # it's handled in the elastic_filter\n return super().get(req, lookup)\n\n\ndef init_app(_app) -> None:\n superdesk.register_resource(\"links\", LinksResource, LinksService, _app=_app)\n","repo_name":"superdesk/superdesk-core","sub_path":"apps/links.py","file_name":"links.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"24615073139","text":"from functools import wraps\nfrom webbrowser import get as browser\n\nfrom .Captcha import Captcha\n\n\nCAPTCHA_ERROR_CODE = 14\n\nchrome = browser('google-chrome')\n\nN_MAX_MODEL_ATTEMPTS = 10\ncurrent_attempt = 0\n\n\nclass CaptchaNeeded(Exception):\n def __init__(self, sid: str, image: str):\n super().__init__(f'Captca needed (see {image}, send answer to {sid})')\n\n self.sid = sid\n self.image = image\n\n\ndef type_key(url: str):\n chrome.open_new_tab(url)\n return input(f'Captcha needed: {url}\\n')\n\n\ndef get_key_using_model(url: str):\n global current_attempt\n\n if current_attempt > N_MAX_MODEL_ATTEMPTS:\n return type_key(url)\n\n print(f'Trying to solve captcha automatically {current_attempt}...')\n current_attempt += 1\n\n return Captcha(url = url).text\n # raise NotImplementedError(f'Cannot solve captcha using model: {url}')\n\n\ndef get_key_from_external_service(url: str):\n raise NotImplementedError(f'External service for captcha decoding is not configured: {url}')\n\n\ndef handle_captcha(get_key: callable = type_key):\n def handle_captcha_(send_request):\n @wraps(send_request)\n def handle_captcha__(*args, **kwargs):\n global current_attempt\n\n while True:\n try:\n result = send_request(*args, **kwargs)\n current_attempt = 0\n return result\n except CaptchaNeeded as error:\n kwargs['captcha_sid'] = error.sid\n kwargs['captcha_key'] = get_key(error.image)\n\n return handle_captcha__(*args, **kwargs)\n\n return handle_captcha__\n\n return handle_captcha_\n\n\ndef try_raise_captcha_error(response: dict):\n error = response.get('error')\n\n if error is not None and error.get('error_code') == CAPTCHA_ERROR_CODE:\n raise CaptchaNeeded(sid = int(error['captcha_sid']), image = error['captcha_img'])\n\n\ndef try_add_captcha_params(request: dict, captcha_sid: int = None, captcha_key: str = None):\n if captcha_sid is not None and captcha_key is not None:\n request['captcha_sid'] = captcha_sid\n request['captcha_key'] = captcha_key\n\n return request\n","repo_name":"zeionara/cold","sub_path":"cold/util/captcha.py","file_name":"captcha.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17138476293","text":"from resticweb import db, bcrypt\nfrom resticweb.models.general import CredentialGroup, CredentialStore, SysVars, RepositoryType\nfrom resticweb.dictionary.resticweb_constants import System, Credential\nfrom resticweb.blueprints.users.models import User\nfrom sqlalchemy.exc import OperationalError\nimport logging\nfrom resticweb.dictionary.resticweb_exceptions import UBInitFailure\n\n\ndef init_db():\n logger = logging.getLogger(\"mainLogger\")\n try:\n db_initialized = SysVars.query.filter_by(\n var_name=System.DB_INITIALIZED_VAR_NAME).first()\n except OperationalError as e:\n raise UBInitFailure(\"init_db was called before the databases/tables \"\n f\"were created. ---- {e}\")\n if db_initialized is None:\n pass # db not initialized so we can proceed with initialization\n elif db_initialized.var_data == \"1\":\n return # we can safely return if the db has been initialized\n\n # initialize the storage for the credential database encryption key\n credential_key_group = CredentialGroup(\n id=0,\n service_name=Credential.CREDENTIAL_KEY_GROUP_NAME)\n credential_key = CredentialStore(\n group_id=0,\n credential_role=Credential.CREDENTIAL_KEY_ROLE_NAME,\n credential_data=\"\"\n )\n db.session.add(credential_key_group)\n db.session.add(credential_key)\n\n # initialize the indicator whether or not credential database is encrypted\n cred_db_encrypted = SysVars(\n var_name=Credential.CREDENTIAL_DB_ENCRYPTED,\n var_data=\"0\"\n )\n db.session.add(cred_db_encrypted)\n\n # add a default username and password for logging in\n user = User(\n username=\"admin\",\n password=bcrypt.generate_password_hash(\"password\").decode('utf-8')\n )\n db.session.add(user)\n\n # we'll try committing all those objects that we have just added\n try:\n db.session.commit()\n except OperationalError as e:\n logger.error(f\"Failed to initialize the database: {e}\")\n raise UBInitFailure\n\n # we can finally declare the database as initialized\n db_initialized = SysVars(\n var_name=System.DB_INITIALIZED_VAR_NAME,\n var_data=\"1\"\n )\n db.session.add(db_initialized)\n\n # add default repository types\n repository_type_type = RepositoryType(\n id=1,\n name=\"Local Filesystem\",\n type=\"local\",\n internal_binding='local',\n description=\"Location type referencing a place in the filesystem on the same machine as the server\")\n db.session.add(repository_type_type)\n\n repository_type_type = RepositoryType(\n id=2,\n name=\"Amazon S3\",\n type=\"cloud\",\n internal_binding='amazons3',\n description=\"Repository type referencing a place in an Amazon S3 bucket.\")\n db.session.add(repository_type_type)\n\n repository_type_type = RepositoryType(\n id=3,\n name=\"RClone\",\n type=\"cloud\",\n internal_binding='rclone',\n description=\"Placeholder for RClone repository types. Feel free to create custom ones that correspond to your repo locations.\")\n db.session.add(repository_type_type)\n\n db.session.commit()","repo_name":"XXL6/resticweb","sub_path":"resticweb/init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1611648342","text":"\"\"\"\"\"\n---------------------------------------------------------------------------------------------------------------\nRead in a list of words from File. Then prompt the user to enter a word to\nsearch the list. The program reports if the search word is found in the list.\n---------------------------------------------------------------------------------------------------------------\n\"\"\"\n\n\nclass BinarySearch:\n @staticmethod\n def searching():\n # reading the file\n f = open('maninums', \"r\")\n for i in f:\n # splitting the string and storing it into the list\n arr = i.split(\" \")\n arr.sort()\n print(arr)\n s = str(input(\"Enter the string to search : \"))\n # taking the lower boundary as '0' and the upper boundary as the length of the list\n low = 0\n u = int(len(arr) - 1)\n while low <= u:\n mid = int((low + (u - 1) / 2))\n if s == arr[u]:\n # if the element is present at the end of the list it will print and exit from the loop\n print(\"Fount at index :\", u)\n break\n # if the element is present at the mid of the list it will print and exit from the loop\n if s == arr[mid]:\n print(\"Fount at index :\", mid)\n break\n if arr[mid] < s:\n # if the middle element is greater than the given element\n # it will neglect the left part and moves to right\n # Lower boundary changes to mid\n low = mid + 1\n elif arr[mid] > s:\n # if the middle element is less than the given element it will neglect the right part and moves to left\n # upper boundary values changes to mid\n u = mid - 1\n else:\n # if the element is not found then it will print not found\n print(\"Not Found\")\n\n\nBinarySearch.searching()","repo_name":"SubbuDevasani/Programs","sub_path":"2.Algorithm Programs/BinarSearch.py","file_name":"BinarSearch.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6431308064","text":"import json\nimport tkinter as tk\nfrom tkinter import ttk\n\nfrom src.frames.views.modes import ViewModeVPC\nfrom src.objects.modes import VPC\n\nfrom .mode import FrameMode\n\n\nclass FrameModeVPC(FrameMode, ttk.Frame):\n def __init__(self, master:tk.Widget):\n super().__init__(master)\n\n self.ui = ViewModeVPC(self)\n self.set_default()\n\n\n def set_default(self):\n \"\"\"\n Sets the default values to the variables and sets the spinbox limits.\n \"\"\"\n\n with open(\"resources/modes.json\") as fid:\n vpc = json.load(fid)[\"vpc\"]\n\n start, from_, to, step = vpc[\"peep\"]\n self.ui.var_peep.set(start)\n self.ui.peep_spin.configure(from_=from_, to=to, increment=step)\n start, from_, to, step = vpc[\"p support\"]\n self.ui.var_p_support.set(start)\n self.ui.p_support_spin.configure(from_=from_, to=to, increment=step)\n start, from_, to, step = vpc[\"ti\"]\n self.ui.var_ti.set(start)\n self.ui.ti_spin.configure(from_=from_, to=to, increment=step)\n start, from_, to, step = vpc[\"br\"]\n self.ui.var_br.set(start)\n self.ui.br_spin.configure(from_=from_, to=to, increment=step)\n start, from_, to, step = vpc[\"trigger\"]\n self.ui.var_trigger.set(start)\n self.ui.trigger_spin.configure(from_=from_, to=to, increment=step)\n\n\n def get(self):\n \"\"\"\n Returns the VPC parameters.\n \"\"\"\n\n return VPC(\n peep=self.ui.var_peep.get(),\n p_support=self.ui.var_p_support.get(),\n ti=self.ui.var_ti.get(),\n br=self.ui.var_br.get(),\n trigger=self.ui.var_trigger.get()\n )\n\n\n def get_dict(self):\n \"\"\"\n Returns the mode parameters as a dict.\n\n Returns:\n dict: VAC parameters\n \"\"\"\n\n return {\n \"peep\": self.ui.var_peep.get(),\n \"p_support\": self.ui.var_p_support.get(),\n \"ti\": self.ui.var_ti.get(),\n \"br\": self.ui.var_br.get(),\n \"trigger\": self.ui.var_trigger.get()\n }\n\n\n def set(self, **kwargs):\n \"\"\"\n Sets the VPC parameters.\n \"\"\"\n\n self.ui.var_peep.set(kwargs[\"peep\"])\n self.ui.var_p_support.set(kwargs[\"p_support\"])\n self.ui.var_ti.set(kwargs[\"ti\"])\n self.ui.var_br.set(kwargs[\"br\"])\n self.ui.var_trigger.set(kwargs[\"trigger\"])\n","repo_name":"deplanty/virtual-respirator","sub_path":"src/frames/ctrl/modes/vpc.py","file_name":"vpc.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"6427135703","text":"from django.shortcuts import render\nfrom rest_framework import generics, status\nfrom .serializers import requestSerializer\nfrom .models import SimulationRequest\nfrom .models import RealizationsStatuses\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.contrib.auth.models import User\nimport time\nimport uuid \nfrom django.http import HttpResponse\nfrom django.utils import timezone\n# ---write endpoints here---\n\n### authentication endpoints ###\n\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import JsonResponse\nfrom django.middleware.csrf import get_token\nfrom django.views.decorators.http import require_POST\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.views import APIView\nimport json\n\nfrom django.contrib.auth.models import User\nfrom .serializers import RegisterSerializer\nfrom rest_framework import generics\n\n# statistics utilities\nfrom . import demogorgn_backend\nimport datetime \nimport os\nimport base64\n\nSITE_ROOT = os.path.dirname(os.path.realpath(__file__))\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nfrom django.conf import settings\nimport subprocess\n\nclass ListUserSimulationsView(APIView):\n # Ensure only authenticated users can access this view\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n # Filter SimulationRequest objects by the authenticated user\n user_simulations = SimulationRequest.objects.filter(user=request.user)\n\n # Serialize the queryset\n serializer = requestSerializer(user_simulations, many=True)\n\n # Return the serialized data\n return Response(serializer.data)\n\n\nclass RegisterView(generics.CreateAPIView):\n queryset = User.objects.all()\n serializer_class = RegisterSerializer\n\n\ndef get_csrf(request):\n response = JsonResponse({'detail': 'CSRF cookie set'})\n response['X-CSRFToken'] = get_token(request)\n return response\n\n\n@require_POST\ndef login_view(request):\n data = json.loads(request.body)\n username = data.get('username')\n password = data.get('password')\n\n if username is None or password is None:\n return JsonResponse({'detail': 'Please provide username and password.'}, status=400)\n\n user = authenticate(username=username, password=password)\n\n if user is None:\n return JsonResponse({'detail': 'Invalid credentials.'}, status=400)\n\n login(request, user)\n return JsonResponse({'detail': 'Successfully logged in.'})\n\n\ndef logout_view(request):\n if not request.user.is_authenticated:\n return JsonResponse({'detail': 'You\\'re not logged in.'}, status=400)\n\n logout(request)\n return JsonResponse({'detail': 'Successfully logged out.'})\n\n\nclass SessionView(APIView):\n authentication_classes = [SessionAuthentication, BasicAuthentication]\n permission_classes = [IsAuthenticated]\n\n @staticmethod\n def get(request, format=None):\n return JsonResponse({'isAuthenticated': True})\n\n\nclass WhoAmIView(APIView):\n authentication_classes = [SessionAuthentication, BasicAuthentication]\n permission_classes = [IsAuthenticated]\n\n @staticmethod\n def get(request, format=None):\n return JsonResponse({'username': request.user.username, 'email': request.user.email})\n\nclass listView(generics.ListAPIView):\n queryset = SimulationRequest.objects.all()\n serializer_class = requestSerializer\n \nclass requestView(generics.CreateAPIView):\n queryset = SimulationRequest.objects.all()\n serializer_class = requestSerializer\n\n\n### Simulation api endpoints ###\n\nclass CreateSimulationView(APIView):\n serializer_class = requestSerializer\n\n def post(self, request, format=\"None\"):\n serializer = self.serializer_class(data=request.data)\n \n # TODO: Add error handling for type mismatches, etc.\n maxx = float(serializer.initial_data['maxx'])\n maxy = float(serializer.initial_data['maxy'])\n minx = float(serializer.initial_data['minx'])\n miny = float(serializer.initial_data['miny'])\n cellSize = int(serializer.initial_data['cellSize']) # Resolution\n realizations = int(serializer.initial_data['realizations'])\n email = serializer.initial_data['email']\n user = User.objects.get(id=request.user.id)\n # saves simulation request\n \n guid = str(uuid.uuid4())\n \n print(f\"Starting simulation {guid}\")\n # TODO: Hard coding K and Rad values for now - figure out long term solution\n #k = 100\n #rad = 50000\n req = SimulationRequest(user=user, maxx=maxx, maxy=maxy, minx=minx, miny=miny, cellSize=cellSize, realizations=realizations, email=email, guid=guid)\n req.save()\n for x in range(0,realizations):\n realization = RealizationsStatuses(guid = req,status= \"PENDING\", last_update = timezone.now(), rid = x)\n realization.save()\n \n script_path = os.path.join(settings.BASE_DIR,'scripts','simulate.py')\n output_path = os.path.join(settings.BASE_DIR,'api','output')\n command_log_path = os.path.join(settings.BASE_DIR,'api','output',guid,'output.log')\n datafile_path = os.path.join(settings.BASE_DIR,'api','data','PIG_data.csv')\n dbfile_path = os.path.join(settings.BASE_DIR,'db.sqlite3') \n \n os.makedirs(os.path.join(settings.BASE_DIR,'api','output',guid))\n if settings.DEV_MODE:\n command = [\"python3\",script_path,\"--output_dir\",output_path,\"--datafile\",datafile_path,\"--guid\",guid,\"--res\",str(cellSize),\"--num_realizations\",str(realizations), \"--num_cpus\",str(8),\"--dbfile\",dbfile_path, \"--xmin\",str(minx),\"--xmax\", str(maxx),\"--ymin\",str(miny),\"--ymax\",str(maxy) ]\n with open(command_log_path,'w') as out_file:\n process = subprocess.Popen(command, stdout=out_file, stderr=out_file, text=True)\n else:\n # Build up an array where each element is a string representing a line from the SLURM job shell script for this request\n slurm_script_contents = []\n slurm_script_contents.append(\"#!/bin/sh\")\n slurm_script_contents.append(\"#SBATCH --cpus-per-task=12\")\n slurm_script_contents.append(\"#SBATCH --mem=120gb\")\n slurm_script_contents.append(\"#SBATCH --time=24:00:00\")\n slurm_script_contents.append(f\"#SBATCH --job-name=gsim_{guid}\")\n slurm_script_contents.append(\"#SBATCH --mail-type=ALL\")\n #slurm_email_account = \"\"\n #slurm_script_contents.append(f\"#SBATCH --mail-user={slurm_email_account}\")\n slurm_script_contents.append(\"#SBATCH --output=\" + os.path.join(settings.BASE_DIR,'api','output',guid,'serial_%j.out') )\n slurm_script_contents.append(\"pwd; hostname; date\")\n miniconda_interpreter = \"/pubapps/emackie/miniconda3/envs/demogorgn_env/bin/python3\"\n datafile_path = \"/pubapps/emackie/data/total_demo_gl_not_gridded.csv\" \n command = [miniconda_interpreter,script_path,\"--output_dir\",output_path,\"--datafile\",datafile_path,\"--guid\",guid,\"--res\",str(cellSize),\"--num_realizations\",str(realizations), \"--num_cpus\",str(8),\"--dbfile\",dbfile_path ,\"--xmin\",str(minx),\"--xmax\", str(maxx),\"--ymin\",str(miny),\"--ymax\",str(maxy) ]\n command_str = \" \".join(command)\n slurm_script_contents.append(command_str)\n slurm_script_contents.append(\"date\")\n \n # Loop through the array and write out the shell script\n slurm_script_path = os.path.join(settings.BASE_DIR,'api','output',guid,'slurm_script.sh')\n with open(slurm_script_path,\"w\") as slurm_script:\n \n for line in slurm_script_contents:\n slurm_script.write(line + \"\\n\")\n # Initiate the SLURM job. The SLURM Job ID will be stored in file at location command_log_path\n with open(command_log_path,'w') as out_file:\n command = [\"sbatch\",slurm_script_path]\n process = subprocess.Popen(command, stdout=out_file, stderr=out_file, text=True)\n \n return Response({\"guid\":guid})\n \nclass SimulationImageEndpoint(APIView):\n # Ensure only authenticated users can access this view\n permission_classes = [IsAuthenticated]\n\n def get(self, request, guid, realization):\n try:\n # Query for the SimulationRequest object with the provided GUID for the authenticated user\n simulation_request = SimulationRequest.objects.get(guid=guid)\n \n maxRealization = simulation_request.realizations-1\n \n if realization > maxRealization or realization < 0:\n return Response({\"error\": \"Invalid Realization Number\"}, status=status.HTTP_404_NOT_FOUND)\n \n # Construct the path to the PNG file (you may need to adjust this based on your actual directory structure)\n #png_path = os.path.join(SITE_ROOT, f\"{guid}/{realization}.png\")\n png_path = os.path.join(SITE_ROOT, \"output\")\n png_path = os.path.join(png_path, f\"{guid}\")\n png_path = os.path.join(png_path,str(realization))\n png_path = os.path.join(png_path,\"plot.png\")\n \n # Check if the PNG file exists\n if not os.path.exists(png_path):\n return Response({\"error\": \"PNG file not found\"}, status=status.HTTP_404_NOT_FOUND)\n \n # Read the PNG file and convert it to a base64 encoded string\n with open(png_path, \"rb\") as png_file:\n base64_encoded_png = base64.b64encode(png_file.read()).decode(\"utf-8\")\n \n # Return the base64 encoded string as the response\n return Response({\"base64_image\": base64_encoded_png})\n\n except SimulationRequest.DoesNotExist:\n return Response({\"error\": \"Simulation request not found\"}, status=status.HTTP_404_NOT_FOUND)\n \nclass SimulationCSVEndpoint(APIView):\n # Ensure only authenticated users can access this view\n permission_classes = [IsAuthenticated]\n\n def get(self, request, guid, realization):\n try:\n # Query for the SimulationRequest object with the provided GUID for the authenticated user\n simulation_request = SimulationRequest.objects.get(guid=guid)\n \n maxRealization = simulation_request.realizations-1\n \n if realization > maxRealization or realization < 0:\n return Response({\"error\": \"Invalid Realization Number\"}, status=status.HTTP_404_NOT_FOUND)\n \n # Construct the path to the CSV file (this needs adjustment based on the actual directory structure)\n csv_path = os.path.join(SITE_ROOT, \"output\", f\"{guid}\", str(realization), \"sim.csv\")\n \n # Check if the CSV file exists\n if not os.path.exists(csv_path):\n return Response({\"error\": \"CSV file not found\"}, status=status.HTTP_404_NOT_FOUND)\n \n # Read the CSV file\n with open(csv_path, \"r\") as csv_file:\n csv_content = csv_file.read()\n\n # Create the response with CSV content and headers for file download\n response = HttpResponse(csv_content, content_type='text/csv')\n response['Content-Disposition'] = f'attachment; filename=\"{guid}_{realization}.csv\"'\n \n return response\n\n except SimulationRequest.DoesNotExist:\n return Response({\"error\": \"Simulation request not found\"}, status=status.HTTP_404_NOT_FOUND)\n \n \nclass GetStatusView(APIView):\n permission_classes = [IsAuthenticated]\n\n def get(self, request, guid):\n # Query the RealizationsStatuses model for records matching the given guid\n statuses = RealizationsStatuses.objects.filter(guid=guid)\n\n # Construct the response list\n response_list = []\n for status in statuses:\n response_list.append({\n \"guid\": guid, \n \"rid\": status.rid,\n \"timestamp\": timezone.localtime(status.last_update).isoformat(), # Format as ISO 8601 string\n \"status\": status.status\n })\n\n # Return the JsonResponse\n return JsonResponse({'statuses': response_list})\n\nclass CancelRealizationGUIDView(APIView):\n permission_classes = [IsAuthenticated]\n\n def delete(self, request, guid):\n # Retrieve all realizations with the given guid\n realizations = RealizationsStatuses.objects.filter(guid=guid)\n\n if not realizations.exists():\n # If no realizations are found for the guid, return a 404 response\n raise HttpResponse(\"Realization not found\",status=404)\n\n # Iterate over the realizations and update their status if applicable\n for realization in realizations:\n if realization.status not in ['COMPLETE','CANCELLED']:\n realization.status = 'CANCELLED'\n realization.last_update = timezone.now()\n realization.save()\n\n return HttpResponse('All applicable realizations cancelled', status=200)\n\nclass CancelRealizationGUIDRIDView(APIView):\n permission_classes = [IsAuthenticated]\n\n def delete(self, request, guid, rid):\n try:\n # Retrieve the specific realization\n realization = RealizationsStatuses.objects.get(guid=guid, rid=rid)\n\n # Check if the status is neither 'COMPLETE' nor already 'CANCELLED'\n if realization.status not in ['COMPLETE', 'CANCELLED']:\n # Update the status to 'CANCELLED'\n realization.status = 'CANCELLED'\n realization.last_update = timezone.now()\n realization.save()\n return HttpResponse('Realization Cancelled', status=200)\n else:\n # If the realization is already 'COMPLETE' \n return HttpResponse('Realization cannot be cancelled', status=400)\n \n except RealizationsStatuses.DoesNotExist:\n # If the guid,rid tuple does not exist, return a 404 response\n raise HttpResponse(\"Realization not found\",status=404)\n\nclass LookupRequestView(APIView):\n permission_classes = [IsAuthenticated]\n\n def get(self, request, guid):\n #guid = request.query_params.get('guid')\n print(f\"Getting info for guid {guid}\")\n if not guid:\n return Response({\"error\": \"GUID parameter is required.\"}, status=400)\n\n try:\n simulation_request = SimulationRequest.objects.get(guid=guid)\n except SimulationRequest.DoesNotExist:\n return Response({\"error\": \"Simulation request not found.\"}, status=404)\n\n serializer = requestSerializer(simulation_request)\n return Response(serializer.data)","repo_name":"matthibbs7/DEMOGORGN-web","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3980570031","text":"\"\"\"\n [MOST EFFICIENT APPROACH FOR THIS PRORAM]\n\n An Efficient Solution can print all triplets in O(k) time where k is number of triplets printed. \n The idea is to use square sum relation of Pythagorean triplet, i.e., addition of squares of a and \n b is equal to square of c, we can write these number in terms of m and n such that,\n\n a = m2 - n2\n b = 2 * m * n\n c = m2 + n2\n\nbecause,\n a2 = m4 + n4 – 2 * m2 * n2\n b2 = 4 * m2 * n2\n c2 = m4 + n4 + 2* m2 * n2\n We can see that a2 + b2 = c2, so instead of iterating for a, b and c we can iterate for m and n and \n can generate these triplets.\n\"\"\"\n\n\ndef pythagoreanTriplets(limits):\n '''\n docstring\n ''' \n c, m = 0, 2\n\n # iterate until given limit is reached\n while c < limits:\n\n # iterate from n to m - 1\n for n in range(1, m):\n print(f\"m is {m} and n is {n}\") \n\n # now using square sum relation of Pythagorean triplet\n # for any value of n & m\n a = m ** 2 - n ** 2 \n b = 2 * m * n\n c = m **2 + n **2\n\n # but if c is greater than given limit it means c must not be included and break the loop\n if c > limits:\n break\n print(a, b, c)\n m += 1\n \n\nif __name__ == \"__main__\":\n # write your driver code here.\n n = int(input(\"Enter the value of limit: \"))\n pythagoreanTriplets(n)\n","repo_name":"AnshumanSinghh/Updated-Code-Practice","sub_path":"Code_for_wipro/pyth_trplt_eff.py","file_name":"pyth_trplt_eff.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27113276053","text":"def solution(s, skip, index):\n alpha = ['a','b','c','d','e','f','g','h','i','j','k','l','m', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',\n 'v', 'w', 'x', 'y', 'z']\n for sk in skip:\n alpha.remove(sk)\n \n answer = ''\n \n for ss in s:\n answer += alpha[(alpha.index(ss) + index) % len(alpha)] \n \n return answer","repo_name":"HaneulJung/Programmers","sub_path":"Programmers/Lv. 1/둘만의 암호.py","file_name":"둘만의 암호.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74649384584","text":"from django.urls import path\nfrom boards import views as board_views\n\nurlpatterns = [\n # path('', board_views.boards, name = 'boards'),\n path('', board_views.BoardList.as_view(), name = 'boards'),\n # path('boards//', board_views.board_threads, name = 'board_threads'),\n path('/', board_views.ThreadList.as_view(), name = 'board_threads'),\n path('/new/', board_views.NewThread.as_view(), name = 'new_thread'),\n\n # comment urls\n path('/threads//', board_views.CommentList.as_view(), name = 'view_thread'),\n path('/threads//new_parent_comment/', board_views.NewParentComment.as_view(), name = 'new_parent_comment'),\n path('/threads//comments//edit/', board_views.EditComment.as_view(), name = 'edit_comment'),\n path('/threads//comments//reply/', board_views.ReplyComment.as_view(), name = 'reply_comment'),\n path('/threads//comments//', board_views.ViewComment.as_view(), name = 'view_comment'),\n]","repo_name":"hosstay/login-example-DP","sub_path":"boards/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9713091930","text":"Final = []\r\ndef printPaths(i,j,mat,dir,route = []):\r\n if i >= row or j >= clom:\r\n return \r\n route.append(dir)\r\n if i == row-1 and j == clom-1:\r\n temp = route[1:]\r\n Final.append(temp)\r\n if mat[i][j] == 1: \r\n printPaths(i+1,j,mat,'D')\r\n printPaths(i,j+1,mat,'F')\r\n route.pop()\r\n\r\n \r\nmat = [\r\n [1, 0, 0, 0],\r\n [1, 1, 0, 1],\r\n [1, 1, 0, 0],\r\n [0, 1, 1, 1]\r\n]\r\n\r\nrow = len(mat)\r\nclom = len(mat[0])\r\nprintPaths(0,0,mat,'S')\r\nprint(Final)","repo_name":"youssefkhalil320/AlgorithmAnalysisNotes-Algorithms","sub_path":"RateMaze.py","file_name":"RateMaze.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"27076936843","text":"f = open(\"loc-gowalla_edges.txt\", \"r\")\n\nedges = {}\n\nfor x in f:\n a, b = x.split(\"\\t\")\n a = int(a)\n if len(b) > 1:\n b = int(b[0])\n else:\n b = int(b)\n #print(a, b)\n if b not in edges:\n if a not in edges:\n edges[a] = []\n edges[a].append(b)\n elif a not in edges[b]:\n if a not in edges:\n edges[a] = []\n edges[a].append(b)\n\ng = open(\"loc-gowalla_edges-repaired.txt\", \"w\")\nfor v in range(196591):\n for adj in edges[v]:\n g.write(f\"{v}\\t{adj}\\n\")","repo_name":"Cheetar/HPC","sub_path":"brandes/preprocess_gowalla.py","file_name":"preprocess_gowalla.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14976499261","text":"from flask import Flask, jsonify, make_response, request, abort\nimport requests\n\nimport sys\nimport os\n\nconf_dir_default = os.path.expanduser('/Users/fabianbaier/Documents/Stuff/Python/CCM/config')\ncrtfilename = 'client.crt'\nkeyfilename = 'client.key'\ncrt = os.path.join(conf_dir_default, crtfilename)\nkey = os.path.join(conf_dir_default, keyfilename)\n\napp = Flask(__name__)\n\ntasks = [\n {\n 'id': 1,\n 'title': u'Buy groceries',\n 'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',\n 'done': False\n },\n {\n 'id': 2,\n 'title': u'Learn Python',\n 'description': u'Alex you are cool!',\n 'done': False\n }\n]\n\n\n@app.route('/v1.0/tasks', methods=['GET'])\ndef get_tasks():\n return jsonify({'tasks': tasks})\n\n@app.route('/v1.0/containers', methods=['GET'])\ndef get_containers():\n lxdapi = requests.get('https://10.0.49.207:8443/1.0/containers', verify=False, cert=(crt, key))\n lxdapi = lxdapi.json()\n container_list = []\n containers = [{'container-list': container_list}]\n for i in range(len(lxdapi['metadata'])):\n container_list.append(lxdapi['metadata'][i].replace('/1.0/containers/',''))\n return jsonify(containers)\n\n@app.route('/v1.0/containers/raw', methods=['GET'])\ndef get_containers_rawdata():\n lxdapi = requests.get('https://10.0.49.207:8443/1.0/containers', verify=False, cert=(crt, key))\n lxdapi = lxdapi.json()\n containers_rawdata = [{'container-rawdata': lxdapi}]\n return jsonify(containers_rawdata)\n\n@app.route('/v1.0/containers/raw/', methods=['GET'])\ndef get_rawdata_container(task_id):\n lxdapi = requests.get('https://10.0.49.207:8443/1.0/containers/'+task_id+'/state', verify=False, cert=(crt, key))\n lxdapi = lxdapi.json()\n containers_rawdata = [{'container-rawdata': lxdapi}]\n return jsonify(containers_rawdata)\n\n@app.route('/v1.0/', methods=['GET'])\ndef get_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n return jsonify({'task': task[0]})\n\n@app.route('/v1.0/tasks', methods=['POST'])\ndef create_task():\n if not request.json or not 'title' in request.json:\n abort(400)\n task = {\n 'id': tasks[-1]['id'] + 1,\n 'title': request.json['title'],\n 'description': request.json.get('description', \"\"),\n 'done': False\n }\n tasks.append(task)\n return jsonify({'task': task}), 201\n\n@app.errorhandler(500)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 500)\n\nif __name__ == '__main__':\n app.run(debug=True)\n #app.run()\n","repo_name":"fabianbaier/olycloud","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3357913128","text":"while True:\r\n genero = input(\"Ingrese su género ('f' para femenino, 'm' para masculino): \").lower()\r\n\r\n if genero == 'f':\r\n print(\"Votará en una mesa femenina.\")\r\n elif genero == 'm':\r\n print(\"Votará en una mesa masculina.\")\r\n else:\r\n print(\"Género no válido. Por favor, ingrese 'f' o 'm'.\")\r\n\r\n opcion = input(\"¿Desea verificar nuevamente? (s/n): \").lower()\r\n if opcion != 's':\r\n break\r\n","repo_name":"CeleTru/ispcDIA","sub_path":"Ejercicio estructura condicional simple/condicionalSimple3.py","file_name":"condicionalSimple3.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37095583780","text":"import aputils\nimport asyncio\nimport base64\nimport json\nimport logging\nimport socket\nimport traceback\nimport uuid\n\nfrom aiohttp.hdrs import METH_ALL as METHODS\nfrom aiohttp.web import Response as AiohttpResponse, View as AiohttpView\nfrom datetime import datetime\nfrom json.decoder import JSONDecodeError\nfrom urllib.parse import urlparse\nfrom uuid import uuid4\n\n\napp = None\n\nMIMETYPES = {\n\t'activity': 'application/activity+json',\n\t'html': 'text/html',\n\t'json': 'application/json',\n\t'text': 'text/plain'\n}\n\nNODEINFO_NS = {\n\t'20': 'http://nodeinfo.diaspora.software/ns/schema/2.0',\n\t'21': 'http://nodeinfo.diaspora.software/ns/schema/2.1'\n}\n\n\ndef set_app(new_app):\n\tglobal app\n\tapp = new_app\n\n\ndef boolean(value):\n\tif isinstance(value, str):\n\t\tif value.lower() in ['on', 'y', 'yes', 'true', 'enable', 'enabled', '1']:\n\t\t\treturn True\n\n\t\telif value.lower() in ['off', 'n', 'no', 'false', 'disable', 'disable', '0']:\n\t\t\treturn False\n\n\t\telse:\n\t\t\traise TypeError(f'Cannot parse string \"{value}\" as a boolean')\n\n\telif isinstance(value, int):\n\t\tif value == 1:\n\t\t\treturn True\n\n\t\telif value == 0:\n\t\t\treturn False\n\n\t\telse:\n\t\t\traise ValueError('Integer value must be 1 or 0')\n\n\telif value == None:\n\t\treturn False\n\n\ttry:\n\t\treturn value.__bool__()\n\n\texcept AttributeError:\n\t\traise TypeError(f'Cannot convert object of type \"{clsname(value)}\"')\n\n\ndef check_open_port(host, port):\n\tif host == '0.0.0.0':\n\t\thost = '127.0.0.1'\n\n\twith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n\t\ttry:\n\t\t\treturn s.connect_ex((host , port)) != 0\n\n\t\texcept socket.error as e:\n\t\t\treturn False\n\n\nclass DotDict(dict):\n\tdef __init__(self, _data, **kwargs):\n\t\tdict.__init__(self)\n\n\t\tself.update(_data, **kwargs)\n\n\n\tdef __getattr__(self, k):\n\t\ttry:\n\t\t\treturn self[k]\n\n\t\texcept KeyError:\n\t\t\traise AttributeError(f'{self.__class__.__name__} object has no attribute {k}') from None\n\n\n\tdef __setattr__(self, k, v):\n\t\tif k.startswith('_'):\n\t\t\tsuper().__setattr__(k, v)\n\n\t\telse:\n\t\t\tself[k] = v\n\n\n\tdef __setitem__(self, k, v):\n\t\tif type(v) == dict:\n\t\t\tv = DotDict(v)\n\n\t\tsuper().__setitem__(k, v)\n\n\n\tdef __delattr__(self, k):\n\t\ttry:\n\t\t\tdict.__delitem__(self, k)\n\n\t\texcept KeyError:\n\t\t\traise AttributeError(f'{self.__class__.__name__} object has no attribute {k}') from None\n\n\n\t@classmethod\n\tdef new_from_json(cls, data):\n\t\tif not data:\n\t\t\traise JSONDecodeError('Empty body', data, 1)\n\n\t\ttry:\n\t\t\treturn cls(json.loads(data))\n\n\t\texcept ValueError:\n\t\t\traise JSONDecodeError('Invalid body', data, 1)\n\n\n\t@classmethod\n\tdef new_from_signature(cls, sig):\n\t\tdata = cls({})\n\n\t\tfor chunk in sig.strip().split(','):\n\t\t\tkey, value = chunk.split('=', 1)\n\t\t\tvalue = value.strip('\\\"')\n\n\t\t\tif key == 'headers':\n\t\t\t\tvalue = value.split()\n\n\t\t\tdata[key.lower()] = value\n\n\t\treturn data\n\n\n\tdef to_json(self, indent=None):\n\t\treturn json.dumps(self, indent=indent)\n\n\n\tdef update(self, _data, **kwargs):\n\t\tif isinstance(_data, dict):\n\t\t\tfor key, value in _data.items():\n\t\t\t\tself[key] = value\n\n\t\telif isinstance(_data, (list, tuple, set)):\n\t\t\tfor key, value in _data:\n\t\t\t\tself[key] = value\n\n\t\tfor key, value in kwargs.items():\n\t\t\tself[key] = value\n\n\nclass Message(DotDict):\n\t@classmethod\n\tdef new_actor(cls, host, pubkey, description=None):\n\t\treturn cls({\n\t\t\t'@context': 'https://www.w3.org/ns/activitystreams',\n\t\t\t'id': f'https://{host}/actor',\n\t\t\t'type': 'Application',\n\t\t\t'preferredUsername': 'relay',\n\t\t\t'name': 'ActivityRelay',\n\t\t\t'summary': description or 'ActivityRelay bot',\n\t\t\t'followers': f'https://{host}/followers',\n\t\t\t'following': f'https://{host}/following',\n\t\t\t'inbox': f'https://{host}/inbox',\n\t\t\t'url': f'https://{host}/inbox',\n\t\t\t'endpoints': {\n\t\t\t\t'sharedInbox': f'https://{host}/inbox'\n\t\t\t},\n\t\t\t'publicKey': {\n\t\t\t\t'id': f'https://{host}/actor#main-key',\n\t\t\t\t'owner': f'https://{host}/actor',\n\t\t\t\t'publicKeyPem': pubkey\n\t\t\t}\n\t\t})\n\n\n\t@classmethod\n\tdef new_announce(cls, host, object):\n\t\treturn cls({\n\t\t\t'@context': 'https://www.w3.org/ns/activitystreams',\n\t\t\t'id': f'https://{host}/activities/{uuid.uuid4()}',\n\t\t\t'type': 'Announce',\n\t\t\t'to': [f'https://{host}/followers'],\n\t\t\t'actor': f'https://{host}/actor',\n\t\t\t'object': object\n\t\t})\n\n\n\t@classmethod\n\tdef new_follow(cls, host, actor):\n\t\treturn cls({\n\t\t\t'@context': 'https://www.w3.org/ns/activitystreams',\n\t\t\t'type': 'Follow',\n\t\t\t'to': [actor],\n\t\t\t'object': actor,\n\t\t\t'id': f'https://{host}/activities/{uuid.uuid4()}',\n\t\t\t'actor': f'https://{host}/actor'\n\t\t})\n\n\n\t@classmethod\n\tdef new_unfollow(cls, host, actor, follow):\n\t\treturn cls({\n\t\t\t'@context': 'https://www.w3.org/ns/activitystreams',\n\t\t\t'id': f'https://{host}/activities/{uuid.uuid4()}',\n\t\t\t'type': 'Undo',\n\t\t\t'to': [actor],\n\t\t\t'actor': f'https://{host}/actor',\n\t\t\t'object': follow\n\t\t})\n\n\n\t@classmethod\n\tdef new_response(cls, host, actor, followid, accept):\n\t\treturn cls({\n\t\t\t'@context': 'https://www.w3.org/ns/activitystreams',\n\t\t\t'id': f'https://{host}/activities/{uuid.uuid4()}',\n\t\t\t'type': 'Accept' if accept else 'Reject',\n\t\t\t'to': [actor],\n\t\t\t'actor': f'https://{host}/actor',\n\t\t\t'object': {\n\t\t\t\t'id': followid,\n\t\t\t\t'type': 'Follow',\n\t\t\t\t'object': f'https://{host}/actor',\n\t\t\t\t'actor': actor\n\t\t\t}\n\t\t})\n\n\n\t# misc properties\n\t@property\n\tdef domain(self):\n\t\treturn urlparse(self.id).hostname\n\n\n\t# actor properties\n\t@property\n\tdef shared_inbox(self):\n\t\treturn self.get('endpoints', {}).get('sharedInbox', self.inbox)\n\n\n\t# activity properties\n\t@property\n\tdef actorid(self):\n\t\tif isinstance(self.actor, dict):\n\t\t\treturn self.actor.id\n\n\t\treturn self.actor\n\n\n\t@property\n\tdef objectid(self):\n\t\tif isinstance(self.object, dict):\n\t\t\treturn self.object.id\n\n\t\treturn self.object\n\n\n\t@property\n\tdef signer(self):\n\t\treturn aputils.Signer.new_from_actor(self)\n\n\nclass Response(AiohttpResponse):\n\t@classmethod\n\tdef new(cls, body='', status=200, headers=None, ctype='text'):\n\t\tkwargs = {\n\t\t\t'status': status,\n\t\t\t'headers': headers,\n\t\t\t'content_type': MIMETYPES[ctype]\n\t\t}\n\n\t\tif isinstance(body, bytes):\n\t\t\tkwargs['body'] = body\n\n\t\telif isinstance(body, dict) and ctype in {'json', 'activity'}:\n\t\t\tkwargs['text'] = json.dumps(body)\n\n\t\telse:\n\t\t\tkwargs['text'] = body\n\n\t\treturn cls(**kwargs)\n\n\n\t@classmethod\n\tdef new_error(cls, status, body, ctype='text'):\n\t\tif ctype == 'json':\n\t\t\tbody = json.dumps({'status': status, 'error': body})\n\n\t\treturn cls.new(body=body, status=status, ctype=ctype)\n\n\n\t@property\n\tdef location(self):\n\t\treturn self.headers.get('Location')\n\n\n\t@location.setter\n\tdef location(self, value):\n\t\tself.headers['Location'] = value\n\n\nclass View(AiohttpView):\n\tasync def _iter(self):\n\t\tif self.request.method not in METHODS:\n\t\t\tself._raise_allowed_methods()\n\n\t\tmethod = getattr(self, self.request.method.lower(), None)\n\n\t\tif method is None:\n\t\t\tself._raise_allowed_methods()\n\n\t\treturn await method(**self.request.match_info)\n\n\n\t@property\n\tdef app(self):\n\t\treturn self._request.app\n\n\n\t@property\n\tdef config(self):\n\t\treturn self.app.config\n\n\n\t@property\n\tdef database(self):\n\t\treturn self.app.database\n","repo_name":"atsu1125/relay","sub_path":"relay/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":6858,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"70748913864","text":"import functools\nimport typing as T\n\nfrom dpipes.pipeline import Pipeline, make_partials\n\n\nclass PipeProcessor(Pipeline):\n \"\"\"\n Class to sequentially process an arbitrary number of pandas.DataFrame.pipe functions.\n \"\"\"\n\n def __call__(self, df):\n if self.kwargs:\n self.funcs = make_partials(self.funcs, self.kwargs)\n return functools.reduce(lambda _df, trans: _df.pipe(trans), self.funcs, df)\n\n\nclass ColumnPipeProcessor(PipeProcessor):\n \"\"\"\n Class to sequentially process an arbitrary number of pandas.DataFrame.pipe functions by column.\n \"\"\"\n\n def __init__(\n self,\n funcs: T.Sequence[T.Callable],\n cols: T.Optional[T.Union[str, T.Sequence[T.Union[str, T.Sequence[str]]]]],\n ):\n \"\"\"\n Instantiate processor.\n\n Parameters\n ----------\n funcs: Sequence[Callable]\n An iterable collection of user-defined functions. Function signatures should match\n `func(df, cols)`, where `df` is a pandas.DataFrame and `cols` is an optional list of\n columns to apply functions to.\n cols: Optional[Union[str, Sequence[Union[str, Sequence[str]]]\n An iterable collection of columns to apply respective functions to. If a single string\n or single list of strings is passed they will be broadcast across the sequence of\n functions.\n\n Returns\n -------\n pd.DataFrame\n A processed DataFrame.\n \"\"\"\n super().__init__(funcs)\n if cols:\n # broadcast single string or single list\n if isinstance(cols, str) or (\n isinstance(cols, T.Sequence) and all(isinstance(x, str) for x in cols)\n ):\n self.funcs = [functools.partial(f, cols=cols) for f in funcs]\n\n else:\n self._check_args(funcs, cols)\n self.funcs = [\n functools.partial(f, cols=c) if c else f\n for f, c in zip(funcs, cols)\n ]\n\n else: # apply funcs to entire dataframe\n self.funcs = funcs\n","repo_name":"chris-santiago/dpipes","sub_path":"dpipes/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20226343439","text":"# Prim's algorithm\n# Used to find the minimum spanning tree of a connected, undirected graph with weighted edges\n# O(n*m) for naive implementation; O(mlog(n)) for heap implementation\n\n# Each edge will be in the format [head_node, tail_node, edge_weight]\nfile = open('./prim.txt', 'r')\nallEdges = []\nvisitedNodes = {}\nnumberOfNodes = 0\nnumberOfEdges = 0\n\nfor line in file:\n edge = [int(i) for i in line.split()]\n\n if len(edge) > 2:\n allEdges.append(edge)\n\n else:\n numberOfNodes = edge[0]\n numberOfEdges = edge[1]\n\n# Pick an arbitary node to start\nstartNode = allEdges[0][0]\nvisitedNodes[startNode] = 1\ntotalMSTWeight = 0 # We need to calculate the sum of the weights of all the edges in the MST\n\n# While there are still unvisited nodes\nwhile len(visitedNodes.keys()) != numberOfNodes:\n # out of all of the edges where one node is in X, the other is not, pick the one with smallest weight\n smallestEdge = [0,0,100000000000]\n\n for edge in allEdges:\n if edge[0] in visitedNodes and edge[1] not in visitedNodes:\n if edge[2] < smallestEdge[2]:\n smallestEdge = edge\n elif edge[1] in visitedNodes and edge[0] not in visitedNodes:\n if edge[2] < smallestEdge[2]:\n smallestEdge = edge\n\n # add this chosen edge to T\n totalMSTWeight += smallestEdge[2]\n # add the not yet seen node of this edge to X\n visitedNodes[smallestEdge[0]] = 1\n visitedNodes[smallestEdge[1]] = 1\n\nprint(totalMSTWeight)\n","repo_name":"Mel0nHead/algorithms","sub_path":"greedy-algorithms/prims_algorithm.py","file_name":"prims_algorithm.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42968994161","text":"import regex as re\nfrom functools import lru_cache\nfrom typing import Any, Dict, TextIO\n\nINPUT = \"input\"\n\n\ndef read_rules(fin: TextIO) -> Dict[int, str]:\n result = {}\n for line in fin:\n if not line.strip():\n break\n rule_num, rule_str = line.strip().split(\": \")\n result[int(rule_num)] = rule_str\n return result\n\n\ndef parse_rule(rule: str, rules: Dict[int, str], part2: bool) -> Any:\n @lru_cache(None)\n def parse_rule_helper(rule: str) -> str:\n if part2:\n if rule == \"8\":\n return f\"{parse_rule_helper('42')}+\"\n if rule == \"11\":\n return f\"(?P{parse_rule_helper('42')}(?&name)?{parse_rule_helper('31')})\"\n\n if rule.startswith('\"'):\n return rule.strip('\"')\n if \"|\" in rule:\n left_rule, right_rule = rule.split(\"|\")\n return f\"({parse_rule_helper(left_rule.strip())}|{parse_rule_helper(right_rule.strip())})\"\n if \" \" in rule:\n return \"\".join(parse_rule_helper(x.strip()) for x in rule.split(\" \"))\n return parse_rule_helper(rules[int(rule)])\n\n return re.compile(parse_rule_helper(rule))\n\n\ndef main() -> None:\n with open(INPUT, \"r\") as fin:\n rules = read_rules(fin)\n\n p1 = parse_rule(rules[0], rules, False)\n p2 = parse_rule(rules[0], rules, True)\n p1_total = 0\n p2_total = 0\n for line in fin:\n if p1.fullmatch(line.strip()):\n p1_total += 1\n if p2.fullmatch(line.strip()):\n p2_total += 1\n\n print(p1_total)\n print(p2_total)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NickG123/AdventOfCode2020","sub_path":"Day 19/day19.py","file_name":"day19.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18012686030","text":"from envconnect import RemoteGremlin\nimport os\n\nrg = RemoteGremlin(\"172.17.0.2\")\nrg.open()\n\n\ndef save_graph():\n graphmlPath = \"data/A-Fish-Named-Wanda.xml\"\n g = rg.g\n # drop the existing content of the graph\n g.V().drop().iterate()\n g.addV(\"Fish\").property(\"name\", \"Wanda\").iterate()\n g.io(graphmlPath).write().iterate()\n print(\"wrote graph to %s\" % (graphmlPath))\n # check that the graphml file exists\n assert os.path.isfile(graphmlPath)\n\n\nif __name__ == \"__main__\":\n save_graph()\n","repo_name":"sergio12S/graph","sub_path":"saveData.py","file_name":"saveData.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18496512037","text":"import os\nimport torch\nimport numpy as np\nimport imageio \nimport json\nimport torch.nn.functional as F\nimport cv2\n\n\ntrans_t = lambda t : torch.Tensor([\n [1,0,0,0],\n [0,1,0,0],\n [0,0,1,t],\n [0,0,0,1]]).float()\n\nrot_phi = lambda phi : torch.Tensor([\n [1,0,0,0],\n [0,np.cos(phi),-np.sin(phi),0],\n [0,np.sin(phi), np.cos(phi),0],\n [0,0,0,1]]).float()\n\nrot_theta = lambda th : torch.Tensor([\n [np.cos(th),0,-np.sin(th),0],\n [0,1,0,0],\n [np.sin(th),0, np.cos(th),0],\n [0,0,0,1]]).float()\n\n\ndef pose_spherical(theta, phi, radius):\n c2w = trans_t(radius)\n c2w = rot_phi(phi/180.*np.pi) @ c2w\n c2w = rot_theta(theta/180.*np.pi) @ c2w\n c2w = torch.Tensor(np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]])) @ c2w\n return c2w\n\ndef read_files(rgb_file, downsample_scale=None):\n # fname = os.path.join(basedir, rgb_file)\n fname = rgb_file\n img = cv2.imread(fname, cv2.IMREAD_UNCHANGED)\n\n if downsample_scale is not None:\n img = cv2.resize(img, (int(img.shape[1]/downsample_scale), int(img.shape[0]/downsample_scale)), interpolation=cv2.INTER_LINEAR)\n\n if img.shape[-1] == 4:\n convert_fn = cv2.COLOR_BGRA2RGBA\n else:\n convert_fn = cv2.COLOR_BGR2RGB\n img = (cv2.cvtColor(img, convert_fn) / 255.).astype(np.float32) # keep 4 channels (RGBA) if available\n\n return img\n\n# def load_ground_truth_depth(depth_file, depth_scaling_factor, near, far):\n# gt_depth = cv2.imread(depth_file, cv2.IMREAD_UNCHANGED).astype(np.float64)[...,0]\n# gt_depth = (gt_depth / depth_scaling_factor).astype(np.float32)\n\n# return gt_depth\n\ndef load_ground_truth_depth(depth_file, depth_scaling_factor, near, far):\n gt_depth = cv2.imread(depth_file, cv2.IMREAD_UNCHANGED).astype(np.float64)\n gt_depth = (gt_depth / depth_scaling_factor).astype(np.float32)\n\n return gt_depth\n\ndef load_blender_data(basedir, half_res=False, testskip=1):\n splits = ['train', 'val', 'test']\n metas = {}\n for s in splits:\n with open(os.path.join(basedir, 'transforms_{}.json'.format(s)), 'r') as fp:\n metas[s] = json.load(fp)\n\n all_imgs = []\n all_poses = []\n counts = [0]\n for s in splits:\n meta = metas[s]\n imgs = []\n poses = []\n if s=='train' or testskip==0:\n skip = 1\n else:\n skip = testskip\n \n for frame in meta['frames'][::skip]:\n fname = os.path.join(basedir, frame['file_path'] + '.png')\n imgs.append(imageio.imread(fname))\n poses.append(np.array(frame['transform_matrix']))\n imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA)\n poses = np.array(poses).astype(np.float32)\n counts.append(counts[-1] + imgs.shape[0])\n all_imgs.append(imgs)\n all_poses.append(poses)\n \n i_split = [np.arange(counts[i], counts[i+1]) for i in range(3)]\n \n imgs = np.concatenate(all_imgs, 0)\n poses = np.concatenate(all_poses, 0)\n \n H, W = imgs[0].shape[:2]\n camera_angle_x = float(meta['camera_angle_x'])\n focal = .5 * W / np.tan(.5 * camera_angle_x)\n \n render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)\n \n if half_res:\n H = H//2\n W = W//2\n focal = focal/2.\n\n imgs_half_res = np.zeros((imgs.shape[0], H, W, 4))\n for i, img in enumerate(imgs):\n imgs_half_res[i] = cv2.resize(img, (W, H), interpolation=cv2.INTER_AREA)\n imgs = imgs_half_res\n # imgs = tf.image.resize_area(imgs, [400, 400]).numpy()\n\n \n return imgs, poses, render_poses, [H, W, focal], i_split\n\n### For fixed dist test time data\ndef load_scene_blender_fixed_dist_new(basedir, half_res=True, train_dist=1.0, test_dist=1.0, val_dist=1.0):\n splits = ['train', 'val', 'test']\n\n all_imgs = []\n\n all_poses = []\n all_intrinsics = []\n counts = [0]\n filenames = []\n\n for s in splits:\n\n if s == \"train\":\n folder = 'radius_{}_{}'.format(str(train_dist), s)\n transforms_file = 'transforms_radius{}_{}.json'.format(str(train_dist), s)\n elif s == \"val\":\n folder = 'radius_{}_{}'.format(str(val_dist), s)\n transforms_file = 'transforms_radius{}_{}.json'.format(str(val_dist), s) \n elif s == \"test\":\n folder = 'radius_{}_{}'.format(str(test_dist), s)\n transforms_file = 'transforms_radius{}_{}.json'.format(str(test_dist), s) \n else:\n ## dummy will return not exist\n transforms_file = \"blah\"\n\n if os.path.exists(os.path.join(basedir, transforms_file)):\n\n json_fname = os.path.join(basedir, transforms_file)\n\n with open(json_fname, 'r') as fp:\n meta = json.load(fp)\n\n # if 'train' in s:\n near = 2.\n far = 6.\n camera_angle_x = float(meta['camera_angle_x'])\n\n imgs = []\n poses = []\n intrinsics = []\n\n if s=='train':\n skip = 1\n elif s == \"val\":\n skip = 1\n elif s ==\"test\":\n skip = 4\n elif \"video\" in s:\n skip = 1\n \n for frame in meta['frames'][::skip]:\n if len(frame['file_path']) != 0 :\n if half_res :\n downsample = 2\n else:\n downsample = 1\n\n img = read_files(os.path.join(basedir, frame['file_path']+\".png\"), downsample_scale=downsample)\n\n filenames.append(frame['file_path'])\n imgs.append(img)\n\n # poses.append(np.array(frame['transform_matrix'])@ BLENDER2OPENCV)\n poses.append(np.array(frame['transform_matrix']))\n\n H, W = img.shape[:2]\n focal = .5 * W / np.tan(.5 * camera_angle_x) \n\n fx, fy, cx, cy = focal, focal, W/2.0, H/2.0\n intrinsics.append(np.array((fx, fy, cx, cy)))\n\n counts.append(counts[-1] + len(poses))\n if len(imgs) > 0:\n all_imgs.append(np.array(imgs))\n all_poses.append(np.array(poses).astype(np.float32))\n all_intrinsics.append(np.array(intrinsics).astype(np.float32))\n\n else:\n counts.append(counts[-1])\n\n render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)\n\n i_split = [np.arange(counts[i], counts[i+1]) for i in range(len(splits))]\n imgs = np.concatenate(all_imgs, 0)\n poses = np.concatenate(all_poses, 0)\n intrinsics = np.concatenate(all_intrinsics, 0)\n \n return imgs, poses, render_poses, [H, W, focal], i_split\n\n\ndef load_scene_blender2(basedir, train_json = \"transforms_train.json\", half_res=True):\n splits = ['train', 'val', 'test']\n # splits = ['test']\n\n all_imgs = []\n\n all_poses = []\n all_intrinsics = []\n counts = [0]\n filenames = []\n for s in splits:\n if os.path.exists(os.path.join(basedir, '{}_transforms.json'.format(s))):\n\n json_fname = os.path.join(basedir, '{}_transforms.json'.format(s))\n\n with open(json_fname, 'r') as fp:\n meta = json.load(fp)\n\n if 'train' in s:\n near = 2.\n far = 6.\n camera_angle_x = float(meta['camera_angle_x'])\n\n imgs = []\n poses = []\n intrinsics = []\n\n if s=='train':\n skip = 1\n elif s ==\"test\":\n skip = 8\n elif \"video\" in s:\n skip = 1\n \n for frame in meta['frames'][::skip]:\n if len(frame['file_path']) != 0 :\n if half_res :\n downsample = 2\n else:\n downsample = 1\n\n img = read_files(os.path.join(basedir, frame['file_path']+\".png\"), downsample_scale=downsample)\n\n filenames.append(frame['file_path'])\n imgs.append(img)\n\n # poses.append(np.array(frame['transform_matrix'])@ BLENDER2OPENCV)\n poses.append(np.array(frame['transform_matrix']))\n\n H, W = img.shape[:2]\n focal = .5 * W / np.tan(.5 * camera_angle_x) \n\n fx, fy, cx, cy = focal, focal, W/2.0, H/2.0\n intrinsics.append(np.array((fx, fy, cx, cy)))\n\n counts.append(counts[-1] + len(poses))\n if len(imgs) > 0:\n all_imgs.append(np.array(imgs))\n all_poses.append(np.array(poses).astype(np.float32))\n all_intrinsics.append(np.array(intrinsics).astype(np.float32))\n\n else:\n counts.append(counts[-1])\n\n render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)\n\n i_split = [np.arange(counts[i], counts[i+1]) for i in range(len(splits))]\n imgs = np.concatenate(all_imgs, 0)\n poses = np.concatenate(all_poses, 0)\n intrinsics = np.concatenate(all_intrinsics, 0)\n \n return imgs, poses, render_poses, [H, W, focal], i_split\n\n\ndef load_scene_blender2_depth(basedir, train_json = \"transforms_train.json\", half_res=True, train_skip=1, near_plane=2.0):\n splits = ['train', 'val', 'test']\n # splits = ['test']\n\n all_imgs = []\n all_depths = []\n all_valid_depths = []\n all_poses = []\n all_intrinsics = []\n counts = [0]\n filenames = []\n for s in splits:\n if os.path.exists(os.path.join(basedir, '{}_transforms.json'.format(s))):\n\n json_fname = os.path.join(basedir, '{}_transforms.json'.format(s))\n\n with open(json_fname, 'r') as fp:\n meta = json.load(fp)\n\n if 'train' in s:\n near = near_plane\n far = 6.\n camera_angle_x = float(meta['camera_angle_x'])\n\n imgs = []\n depths = []\n valid_depths = [] \n poses = []\n intrinsics = []\n\n if s=='train':\n skip = train_skip\n elif s ==\"test\":\n skip = 8\n elif \"video\" in s:\n skip = 1\n \n for frame in meta['frames'][::skip]:\n if len(frame['file_path']) != 0 :\n if half_res :\n downsample = 2\n else:\n downsample = 1\n\n img = read_files(os.path.join(basedir, frame['file_path']+\".png\"), downsample_scale=downsample)\n\n max_depth = frame[\"max_depth\"]\n depth_scaling_factor = (255. / max_depth)\n\n # if \"chair\" in basedir:\n # depth = load_ground_truth_depth(os.path.join(basedir, frame['depth_file_path']+\"0000.png\"), depth_scaling_factor, near, far)\n # else:\n # depth = load_ground_truth_depth(os.path.join(basedir, frame['depth_file_path']+\"0001.png\"), depth_scaling_factor, near, far)\n\n depth = load_ground_truth_depth(os.path.join(basedir, frame['depth_file_path'][:-1]+\".png\"), depth_scaling_factor, near, far)\n\n if depth.ndim == 2:\n depth = np.expand_dims(depth, -1)\n\n valid_depth = np.logical_and(depth[:, :, 0] > near, depth[:, :, 0] < far) # 0 values are invalid depth\n\n depth = np.clip(depth, near, far)\n\n\n filenames.append(frame['file_path'])\n imgs.append(img)\n depths.append(depth)\n valid_depths.append(valid_depth)\n\n # poses.append(np.array(frame['transform_matrix'])@ BLENDER2OPENCV)\n poses.append(np.array(frame['transform_matrix']))\n\n H, W = img.shape[:2]\n focal = .5 * W / np.tan(.5 * camera_angle_x) \n\n fx, fy, cx, cy = focal, focal, W/2.0, H/2.0\n intrinsics.append(np.array((fx, fy, cx, cy)))\n\n counts.append(counts[-1] + len(poses))\n if len(imgs) > 0:\n all_imgs.append(np.array(imgs))\n all_depths.append(np.array(depths))\n all_valid_depths.append(np.array(valid_depths))\n\n all_poses.append(np.array(poses).astype(np.float32))\n all_intrinsics.append(np.array(intrinsics).astype(np.float32))\n\n else:\n counts.append(counts[-1])\n\n render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)\n\n i_split = [np.arange(counts[i], counts[i+1]) for i in range(len(splits))]\n imgs = np.concatenate(all_imgs, 0)\n poses = np.concatenate(all_poses, 0)\n intrinsics = np.concatenate(all_intrinsics, 0)\n depths = np.concatenate(all_depths, 0)\n valid_depths = np.concatenate(all_valid_depths, 0)\n\n gt_depths = depths\n gt_valid_depths = valid_depths\n\n return imgs, depths, valid_depths, poses, [H, W, focal], near, far, i_split, gt_depths, gt_valid_depths, render_poses\n\n\n\n\n\n","repo_name":"mikacuy/PL-NeRF","sub_path":"load_blender.py","file_name":"load_blender.py","file_ext":"py","file_size_in_byte":13302,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"81"} +{"seq_id":"6339619333","text":"import mlflow\nimport pandas as pd\nimport shap\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom io import BytesIO\nimport base64\n\nlogged_model = 'file:///C:/Users/SURFACE/Documents/GitHub/Hackathon-equipe_5/lightgbm_500000_iterative_False_lightgbm'\n\ndata = pd.read_csv(r'..\\..\\df_petit.csv', parse_dates=[\"date\", \"items_first_enabled_date\"])\n# Load model as a PyFuncModel.\nloaded_model = mlflow.pyfunc.load_model(logged_model)\n\n# Predict on a Pandas DataFrame.\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom src.dash.app import app\nimport datetime as dt\nfrom dash.dependencies import Input, Output\n\nshap_layout = [\n dcc.Slider(id=\"slider-shap\",\n min=0,\n max=10,\n step=None,\n marks={store_id: str(store_id) for store_id in data.store_id.unique()},\n value=data.store_id.unique()[0]),\n\n\n dcc.DatePickerRange(\n id=\"date-range\",\n min_date_allowed=data.date.min(),\n max_date_allowed=data.date.max(),\n start_date=data.date.min(),\n end_date=data.date.min(),\n ),\n\n html.Img(id=\"shap\",\n height='auto',\n width='auto',\n )\n ]\n\n\n@app.callback(Output(\"shap\", \"src\"), [Input(\"startdate-input\", \"value\"), Input(\"slider-shap\", \"value\")])\ndef shap_predict(date_value, id_value):\n current = data[(data[\"date\"].isin(date_value)) & (data[\"store_id\"] == id_value)]\n explainer = shap.TreeExplainer(loaded_model)\n shap_values = explainer.shap_values(current.drop([\"target\"], axis=1))\n shap.plots.waterfall(shap_values[0])\n fig = plt.gcf()\n plt.close()\n return fig_to_uri(in_fig=fig)\n\n\ndef fig_to_uri(in_fig, close_all=True, **save_args):\n \"\"\"\n Save a figure as a URI\n :param in_fig:\n :return:\n \"\"\"\n out_img = BytesIO()\n in_fig.savefig(out_img, format='png', **save_args)\n if close_all:\n in_fig.clf()\n plt.close('all')\n out_img.seek(0) # rewind file\n encoded = base64.b64encode(out_img.read()).decode(\"ascii\").replace(\"\\n\", \"\")\n return \"data:image/png;base64,{}\".format(encoded)\n\n","repo_name":"Adrien-Mcode/Hackathon-equipe_5","sub_path":"src/dash/model_report/shap_values.py","file_name":"shap_values.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"12139686951","text":"'''\nmodel模块中模型训练部分:\n模型训练文件: 初始化参数服务,train/test 评估对象,定义模型网络(连接输入和图);\n写train函数-session.run;写valid函数;\n保存模型为在线预估需要的pb格式\n'''\n\nfrom ps_fn import PS \nfrom input_fn import InputFn \nfrom auc_fn import AUCUtils\n# from mmoe import setup_graph, config\n# from dcn import setup_graph, config\n# from deepfm import setup_graph, config\nfrom dnn import setup_graph, config\nfrom save_load_model import save_model_to_ckpt, save_model_to_pb, tensorboard_show_graph\nimport tensorflow as tf\nimport os\nprint()\n\nprint(tf.__version__)\nlocal_ps = PS(config['embedding_dim'])\ntrain_metric = AUCUtils()\ntest_metric = AUCUtils()\ninputs = InputFn(local_ps, config)\n\nmax_steps = config['max_steps']\ntrain_log_iter = config['train_log_iter']\ntest_show_iter = config['test_show_iter']\nlast_test_auc = config['last_test_auc']\n\ntrain_iter, train_inputs = inputs.input_fn(config['train_path'], is_test = False)\ntrain_dic = setup_graph(train_inputs, is_test = False)\ntest_iter, test_inputs = inputs.input_fn(config['test_path'], is_test = True)\ntest_dic = setup_graph(test_inputs, is_test = True)\n\ndef train():\n _iter = 0\n print('#' * 80)\n saver = tf.train.Saver(max_to_keep=1)\n with tf.Session() as sess:\n sess.run([tf.global_variables_initializer(),\n tf.local_variables_initializer()])\n sess.run(train_iter.initializer)\n while _iter < max_steps:\n old_embedding, new_embedding ,keys, out_, _ = sess.run([\n train_dic['feature_embedding'],\n train_dic['feature_new_embedding'],\n train_dic['feature'],\n train_dic['out'],\n train_dic['train_op']\n ])\n\n train_metric.add(\n out_['loss'],\n out_['ground_truth'],\n out_['prediction'])\n\n local_ps.push(keys, new_embedding)\n \n _iter += 1\n if _iter % train_log_iter == 0:\n print('Train at step %d: %s'% (_iter, train_metric.calc_str()))\n train_metric.reset()\n if _iter % test_show_iter == 0:\n valid_step(sess, test_iter, test_dic, saver, _iter)\n \ndef valid_step(sess, test_iter, test_dic, saver, _iter):\n test_metric.reset()\n sess.run(test_iter.initializer)\n global last_test_auc\n while True:\n try:\n out = sess.run(test_dic['out'])\n test_metric.add(\n out['loss'],\n out['ground_truth'],\n out['prediction']\n )\n except tf.errors.OutOfRangeError:\n print('Test at step%d:%s'% (_iter, test_metric.calc_str()))\n if test_metric.calc()['auc'] > last_test_auc:\n save_model_to_ckpt(\n sess, saver, config['saved_checkpoint'],config['checkpoint_name'], _iter\n )\n last_test_auc = test_metric.calc()['auc'] \n local_ps.save(config['saved_embedding'])\n break\n\ndef save_pb():\n input_tensor = config['input_tensor']\n output_tensor = config['output_tensor']\n model_path_dir = config['saved_checkpoint']\n export_path_model = config['saved_pd']\n save_model_to_pb(model_path_dir, export_path_model, input_tensor, output_tensor)\n\n\nif __name__ == '__main__':\n train()\n save_pb()\n # tensorboard_show_graph(config['saved_pd'])\n # tensorboard --logdir log\n # http://localhost:6006\n\n\n ","repo_name":"liumeijun-erin/rec-sort_MMOE_deepfm_DCN","sub_path":"model/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"25619099416","text":"# Temperature box register address\nRUN_SWITCH_ADRESS = 30\nTEMPERATURE_MEASUREMENT_ADRESS = 0\nHUMIDITY_MEASUREMENT_ADRESS = 10\nTEMPERATURE_SET_VALUE_CURRENT_ADRESS = 2\nHUMIDITY_SETVALUE_CURRENT_ADRESS = 12\nTEMPERATURE_SET_VALUE_FINAL_ADRESS = 1\nHUMIDITY_SET_VALUEF_INAL_ADRESS = 11\nTEMPERATURE_SLOPE_ADRESS = 51\n# HUMIDITY_SLOPE_ADRESS = 33\n\n# Temperature box control\nSWITCH_ON = 1\nSWITCH_OFF = 0\n\n# Temperature limit\nHIGH_TEMPERATURE = 90\nLOW_TEMPERATURE = -50\nHIGHT_SLOPE = 10\nLOW_SLOPE = 0","repo_name":"Maxibing/termerpature_box_demo","sub_path":"global_value.py","file_name":"global_value.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40014986892","text":"\"\"\"\n Name: Joseph Kabuika\n Course: ICT 4370: Python Programming\n Term: Summer 2022\n Date: August 21, 2022\n Assignment: WEEK 10 Portfolio Assignment\n\n Notes:\n - This code requires the Data_Bonds.csv and Data_Stocks.csv files (\n provided in the assignment instructions) to be in the same repository in order to run\n successfully.\n - Requires AllStocks.json file to run.\n - After running successfully, the code will generate a report file named\n {investor_name}_investment_report.txt\n - The Code will generate the data char file named line_chart.svg\n\n 🚨 🚨 🚨\n If ModuleNotFoundError: No module named '_tkinter' => Make sure you install the python-tk or\n python3-tk package\n\n # Make sure to specify correct Python version.\n # For example, if you run Python v3.9 run adjust command to\n brew install python-tk@3.9\n\n If you are on Windows, you have to make sure to check the optiontcl/tk and IDLE when installing Python.\n If you already installed Python, download the installer, run it and click Modify. Then check the tcl/tk and IDLE checkbox to install tkinter for your Python version.\n\n\"\"\"\nimport datetime\nimport json\nimport uuid\nimport csv\nfrom tkinter.ttk import Treeview\n\nimport pygal\nfrom tabulate import tabulate\n\nfrom tkinter import *\n\nfrom svglib.svglib import svg2rlg\nfrom reportlab.graphics import renderPM\n\nfrom PIL import ImageTk, Image as PIL_image\nfrom io import BytesIO\n\ndate_format = '%m/%d/%Y'\nbond_data = []\nstock_data = []\nroot = Tk()\n\nwidth = root.winfo_screenwidth()\nheight = root.winfo_screenheight()\nroot.geometry(\"%dx%d\" % (width, height))\nroot.title(\"Welcome to the Portfolio Visualization App!\")\n\n\ndef show_graph():\n print('Displaying graph...')\n file = open_file(\"line_chart.svg\")\n svg_content = file.read()\n file.close()\n\n drawing = svg2rlg(path=BytesIO(bytes(svg_content, 'utf-8')))\n out = BytesIO()\n renderPM.drawToFile(drawing, out, fmt=\"PNG\")\n\n img = PIL_image.open(out)\n pimg = ImageTk.PhotoImage(img)\n size = img.size\n\n frame = Canvas(root, width=size[0], height=size[1])\n\n frame.grid(column=0, row=3, sticky=W, padx=5, pady=5)\n frame.create_image(0, 0, anchor='nw', image=pimg)\n\n\ndef show_report():\n print('Displaying report data...')\n text = Label(root, font=('Helvetica', 12), justify=CENTER, text=report)\n # text.place(x=70, y=90)\n text.grid(column=1, row=3, sticky=W, padx=5, pady=5)\n\n\ndef show_bonds_data():\n print('Displaying bonds data...')\n\n bond_reporting_frame = Frame(root)\n bond_reporting_frame.grid(column=0, row=5, sticky=W, padx=5, pady=5)\n bond_report_data = Treeview(root)\n\n bond_report_data['columns'] = (\n 'PurchaseID', 'Symbol', 'Purchase_Price', 'Current_Price', 'Quantity', 'Coupon',\n 'Yield',\n 'Purchase_Date')\n\n bond_report_data.column(\"#0\", width=0, stretch=NO)\n bond_report_data.column(\"PurchaseID\", anchor=CENTER, width=320)\n bond_report_data.column(\"Symbol\", anchor=CENTER, width=80)\n bond_report_data.column(\"Purchase_Price\", anchor=CENTER, width=80)\n bond_report_data.column(\"Current_Price\", anchor=CENTER, width=80)\n bond_report_data.column(\"Quantity\", anchor=CENTER, width=80)\n bond_report_data.column(\"Coupon\", anchor=CENTER, width=80)\n bond_report_data.column(\"Yield\", anchor=CENTER, width=80)\n bond_report_data.column(\"Purchase_Date\", anchor=CENTER, width=80)\n\n bond_report_data.heading(\"PurchaseID\", text=\"Bond Id\", anchor=CENTER)\n bond_report_data.heading(\"Symbol\", text=\"Symbol\", anchor=CENTER)\n bond_report_data.heading(\"Purchase_Price\", text=\"Purchase Price\", anchor=CENTER)\n bond_report_data.heading(\"Current_Price\", text=\"Current Price\", anchor=CENTER)\n bond_report_data.heading(\"Quantity\", text=\"Quantity\", anchor=CENTER)\n bond_report_data.heading(\"Coupon\", text=\"Coupon\", anchor=CENTER)\n bond_report_data.heading(\"Yield\", text=\"Yield\", anchor=CENTER)\n bond_report_data.heading(\"Purchase_Date\", text=\"Purchase Date\", anchor=CENTER)\n\n for bond in bond_data:\n bond_report_data.insert(parent='', index='end',\n values=(\n bond[0], bond[1], bond[2], bond[3], bond[4], bond[5], bond[6],\n [bond[7]]))\n\n bond_report_data.grid(column=0, row=5, sticky=W, padx=5, pady=5)\n\n\n print('Displaying stock data...')\n\n reporting_frame = Frame(root)\n # game_frame.pack()\n reporting_frame.grid(column=0, row=4, sticky=W, padx=5, pady=5)\n report_data = Treeview(reporting_frame)\n\n report_data['columns'] = (\n 'PurchaseID', 'Symbol', 'Share_Count', 'Earnings_Loss', 'Yearly_Earning_Loss',\n 'Purchase_Date')\n\n report_data.column(\"#0\", width=0, stretch=NO)\n report_data.column(\"PurchaseID\", anchor=CENTER, width=320)\n report_data.column(\"Symbol\", anchor=CENTER, width=160)\n report_data.column(\"Share_Count\", anchor=CENTER, width=160)\n report_data.column(\"Earnings_Loss\", anchor=CENTER, width=160)\n report_data.column(\"Yearly_Earning_Loss\", anchor=CENTER, width=160)\n report_data.column(\"Purchase_Date\", anchor=CENTER, width=160)\n\n report_data.heading(\"PurchaseID\", text=\"Stock Id\", anchor=CENTER)\n report_data.heading(\"Symbol\", text=\"Symbol\", anchor=CENTER)\n report_data.heading(\"Share_Count\", text=\"Share Count\", anchor=CENTER)\n report_data.heading(\"Earnings_Loss\", text=\"Earnings/Loss\", anchor=CENTER)\n report_data.heading(\"Yearly_Earning_Loss\", text=\"Yearly Earning/Loss\", anchor=CENTER)\n report_data.heading(\"Purchase_Date\", text=\"Purchase Date\", anchor=CENTER)\n\n for stock in stock_data:\n report_data.insert(parent='', index='end',\n values=(\n stock[0], stock[1], stock[2], stock[3], stock[4], stock[5]))\n\n report_data.grid(column=0, row=3, sticky=W, padx=5, pady=5)\n\n\nclass Application(Frame):\n\n def create_widgets(self):\n quit = Button(root, text=\"QUIT\", fg=\"red\", command=lambda: root.quit())\n analytics = Button(root, text=\"Show Graph\", command=show_graph)\n reporting = Button(root, text=\"Show Report\", command=show_bonds_data)\n\n quit.grid(column=0, row=2, sticky=W, padx=5, pady=5)\n analytics.grid(column=0, row=0, sticky=W, padx=5, pady=5)\n reporting.grid(column=0, row=1, sticky=W, padx=5, pady=5)\n\n\n# The Stocks class\nclass Stock:\n def __init__(self, stock, share_count, purchase_price, current_value,\n purchase_date):\n self.purchase_id = uuid.uuid1()\n self.stock = stock\n self.share_count = share_count\n self.purchase_price = purchase_price\n self.current_value = current_value\n self.purchase_date = purchase_date\n self.close_prices = []\n self.close_dates = []\n\n def calculate_loss_gain(self):\n return (self.current_value - self.purchase_price) * self.share_count\n\n def calculate_yearly_earnings(self):\n return (((self.current_value - self.purchase_price) / self.purchase_price) /\n (get_today_date() - get_date_from_string(self.purchase_date)).days) * 100\n\n def add_close_value(self, close, date):\n self.close_prices.append(close * self.share_count)\n self.close_dates.append(date)\n\n\nmarket_data = json.load(open('AllStocks.json', encoding='utf-8'))\n\n\ndef get_close_price_by_date(symbol, date):\n return [x for x in market_data if x['Symbol'] == symbol and datetime.datetime.strptime(x[\n 'Date'],\n '%d-%b-%y') == date]\n\n\ndef filter_stock_from_market_data(symbol):\n return [x for x in market_data if x['Symbol'] == symbol]\n\n\nclass StockMarket:\n def __init__(self, symbol, date, close_price):\n self.symbol = symbol\n self.date = date\n self.close_price = close_price\n self.close_value_prices = []\n self.close_dates = []\n\n def add_close_value(self, date, value):\n \"\"\"Add information to the class\"\"\"\n\n self.close_value_prices.append(value)\n self.close_dates.append(date)\n\n\n# The bonds class\nclass Bond(Stock):\n def __init__(self, stock, share_count, purchase_price, current_value,\n purchase_date, bond_coupon, bond_yield):\n super().__init__(stock, share_count, purchase_price, current_value,\n purchase_date)\n self.bond_coupon = bond_coupon\n self.bond_yield = bond_yield\n\n\n# Investor class\nclass Investor:\n def __init__(self, name, phone, address):\n self.investor_id = uuid.uuid1()\n self.name = name\n self.phone = phone\n self.address = address\n self.stocks = []\n self.bonds = []\n\n # Adds a new stock to the investor's portfolio\n def add_stock(self, stock):\n self.stocks.append(stock)\n\n # Adds a new bond to the investor's portfolio\n def add_bond(self, bond):\n self.bonds.append(bond)\n\n # set bonds\n def set_bonds(self, bonds_data):\n self.bonds = bonds_data\n\n # set bonds\n def set_stocks(self, stocks_data):\n self.stocks = stocks_data\n\n\ndef get_date_from_string(str_date):\n return datetime.datetime.strptime(str_date, date_format).date()\n\n\ndef print_line():\n print(\"{:<30}\".format('=============================='))\n\n\ndef get_today_date():\n return datetime.date.today()\n\n\ndef open_file(file_name):\n try:\n file = open(file_name, 'r', encoding='utf-8')\n except OSError:\n print('An error occurred while opening the csv file. Please make sure that the required '\n 'data files exit in your project directory.')\n sys.exit()\n\n return file\n\n\n# Reads the data from the csv file and return it in a list format\ndef get_stock_data_from_csv_file(file_name):\n data = []\n file = open_file(file_name)\n\n try:\n with file:\n reader = csv.reader(file)\n next(reader) # skips the headers\n for row in reader:\n data.append(\n Stock(row[0], float(row[1]), float(row[2]), float(row[3]), row[4]))\n except ValueError:\n print(f'Error occurred while loading the data from {file_name}. Please make sure that '\n f'your data is in the correct format')\n sys.exit()\n\n return data\n\n\n# Reads the bonds data from the provided file\ndef get_bond_data_from_csv_file(file_name):\n data = []\n file = open_file(file_name)\n try:\n with file:\n reader = csv.reader(file)\n next(reader) # skips the headers\n for row in reader:\n data.append(Bond(row[0], float(row[1]), float(row[2]), float(row[3]), row[4],\n float(row[5]), float(row[6])))\n except:\n print(f'Error occurred while loading the data from {file_name}. Please make sure that '\n f'your data is in the correct format')\n sys.exit()\n\n return data\n\n\n# Writes the passed content to the file\ndef write_to_file(file_name, content):\n try:\n file = open(file_name, \"a\", encoding='utf-8')\n file.write(content)\n file.close()\n except OSError:\n print('An error occurred while writing to the file')\n sys.exit()\n\n\nif __name__ == '__main__':\n # Creating the investor Bob Smith\n bob = Investor(\"Bob Smith\", \"720-000-1234\", \"123 Main Street, Denver, CO 80123\")\n\n # Adding the stocks\n bob.set_stocks(get_stock_data_from_csv_file('Data_Stocks.csv'))\n\n # Adding bonds\n bob.set_bonds(get_bond_data_from_csv_file('Data_Bonds.csv'))\n\n # Get Market Data\n stockDictionary = {}\n with open('AllStocks.json') as data_file:\n market_data = json.load(data_file)\n\n # Get all the stocks data\n stocks = [[\"PurchaseID\", \"Symbol\", \"Share #\", \"Earnings/Loss\", \"Yearly Earning/Loss\",\n \"Purchase Date\"]]\n dateline_chart = pygal.DateLine(x_label_rotation=25)\n\n for stock in bob.stocks:\n initial_date = datetime.datetime.strptime(stock.purchase_date, '%m/%d/%Y')\n stocks.append([stock.purchase_id, stock.stock, stock.share_count,\n stock.calculate_loss_gain(),\n stock.calculate_yearly_earnings(), stock.purchase_date])\n stock_data.append([stock.purchase_id, stock.stock, stock.share_count,\n stock.calculate_loss_gain(),\n stock.calculate_yearly_earnings(), stock.purchase_date])\n\n filtered_data = filter_stock_from_market_data(stock.stock)\n\n data_list = []\n\n for data in filtered_data:\n data_date = datetime.datetime.strptime(data['Date'], '%d-%b-%y')\n close_value = data['Close']\n total_value = close_value * stock.share_count\n\n if initial_date > data_date:\n total_value = 0\n\n if stock.stock not in stockDictionary:\n newStockMarket = StockMarket(stock.stock, initial_date, total_value)\n print(stock.stock + \" added\")\n stockDictionary[stock.stock] = newStockMarket\n else:\n stockDictionary[stock.stock].stockClose = total_value\n stockDictionary[stock.stock].add_close_value(data_date, total_value)\n data_point = (data_date, total_value)\n data_list.append(data_point)\n\n dateline_chart.add(stock.stock, data_list)\n\n # Get all the bonds data\n bonds = [[\"PurchaseID\", \"Symbol\", \"Purchase Price\", \"Current Price\", \"Quantity\", \"Coupon\",\n \"Yield\", \"Purchase Date\"]]\n for bond in bob.bonds:\n bonds.append([bond.purchase_id, bond.stock, bond.purchase_price, bond.current_value,\n bond.share_count,\n bond.bond_coupon, bond.bond_yield, bond.purchase_date])\n bond_data.append([bond.purchase_id, bond.stock, bond.purchase_price, bond.current_value,\n bond.share_count,\n bond.bond_coupon, bond.bond_yield, bond.purchase_date])\n # reporting\n report = '\\n\\nStocks Ownership for ' + bob.name + \":\\n\" + tabulate(stocks,\n headers='firstrow',\n tablefmt='fancy_grid') \\\n + '\\n\\nBonds Ownership for ' + bob.name + \":\\n\" + tabulate(bonds, headers='firstrow',\n tablefmt='fancy_grid')\n\n print(report) # Prints the report to the console\n\n write_to_file(bob.name + \"_investment_report.txt\", report) # Save the report to a txt file\n\n # Analytics\n dateline_chart.title = bob.name + \"'s Portfolio Evolution\"\n dateline_chart.x_title = 'Date'\n dateline_chart.y_title = 'Value'\n\n dateline_chart.render_to_file('line_chart.svg')\n\n # UI Integration\n app = Application(master=root)\n app.create_widgets()\n app.mainloop()\n root.destroy()\n","repo_name":"jozykab/ict_4370_portfolioApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6988888403","text":"# Стек с поддержкой максимума\n# Реализовать стек с поддержкой операций push, pop и max.\n# Вход: Последовательность запросов push, pop и max .\n# Выход: Для каждого запроса max вывести максимальное\n# число, находящееся на стеке.\n\nclass Stack(object):\n def __init__(self):\n self.stack = []\n self.max_list = []\n self.max = max\n\n def Push(self, item):\n self.stack.append(item)\n if len(self.max_list) == 0:\n self.max_list.append(item)\n else:\n if item > self.max_list[len(self.stack) - 2]:\n self.max_list.append(item)\n else:\n self.max_list.append(self.max_list[len(self.stack) - 2])\n return self.stack\n\n def Pop(self):\n if len(self.stack) == 0:\n return None\n else:\n self.stack.pop()\n self.max_list.pop()\n return self.stack\n\n def Max(self):\n return self.max_list[len(self.max_list) - 1]\n\ndef Request(stack, s):\n if s.startswith('push'):\n num = int(s[s.index(' ') + 1:])\n return stack.Push(num)\n elif s.startswith('pop'):\n return stack.Pop()\n elif s.startswith('max'):\n print(stack.Max())\n\ndef main():\n n = int(input())\n my_stack = Stack()\n for i in range(n):\n Request(my_stack, input())\n\nif __name__ == '__main__':\n main()","repo_name":"AlbinaKaybysheva/Algorithm_problems","sub_path":"Stack_with_max.py","file_name":"Stack_with_max.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34143332898","text":"from math import ceil\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable, grad\n\n\ndef weights_init(m):\n if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:\n nn.init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif type(m) == nn.BatchNorm2d:\n nn.init.normal_(m.weight, 1.0, 0.02)\n nn.init.constant_(m.bias, 0)\n\n\ndef hypersphere(z, radius=1):\n return z * radius / z.norm(p=2, dim=1, keepdim=True)\n\n\ndef exp_mov_avg(Gs, G, alpha=0.999, global_step=999):\n alpha = min(1 - 1 / (global_step + 1), alpha)\n for ema_param, param in zip(Gs.parameters(), G.parameters()):\n ema_param.data.mul_(alpha).add_(1 - alpha, param.data)\n\n\nclass Progress:\n \"\"\"Determine the progress parameter of the training given the epoch and the progression in the epoch\n Args:\n n_iter (int): the number of epochs before changing the progress,\n pmax (int): the maximum progress of the training.\n batchSizeList (list): the list of the batchSize to adopt during the training\n \"\"\"\n\n def __init__(self, n_iter, pmax, batchSizeList):\n assert n_iter > 0 and isinstance(n_iter, int), 'n_iter must be int >= 1'\n assert pmax >= 0 and isinstance(pmax, int), 'pmax must be int >= 0'\n assert isinstance(batchSizeList, list) and \\\n all(isinstance(x, int) for x in batchSizeList) and \\\n all(x > 0 for x in batchSizeList) and \\\n len(batchSizeList) == pmax + 1, \\\n 'batchSizeList must be a list of int > 0 and of length pmax+1'\n\n self.n_iter = n_iter\n self.pmax = pmax\n self.p = 0\n self.batchSizeList = batchSizeList\n\n def progress(self, epoch, i, total):\n \"\"\"Update the progress given the epoch and the iteration of the epoch\n Args:\n epoch (int): batch of images to resize\n i (int): iteration in the epoch\n total (int): total number of iterations in the epoch\n \"\"\"\n x = (epoch + i / total) / self.n_iter\n self.p = min(max(int(x / 2), x - ceil(x / 2), 0), self.pmax)\n return self.p\n\n def resize(self, images):\n \"\"\"Resize the images w.r.t the current value of the progress.\n Args:\n images (Variable or Tensor): batch of images to resize\n \"\"\"\n x = int(ceil(self.p))\n if x >= self.pmax:\n return images\n else:\n return F.adaptive_avg_pool2d(images, 4 * 2 ** x)\n\n @property\n def batchSize(self):\n \"\"\"Returns the current batchSize w.r.t the current value of the progress\"\"\"\n x = int(ceil(self.p))\n return self.batchSizeList[x]\n\n\nclass GradientPenalty:\n \"\"\"Computes the gradient penalty as defined in \"Improved Training of Wasserstein GANs\"\n (https://arxiv.org/abs/1704.00028)\n Args:\n batchSize (int): batch-size used in the training. Must be updated w.r.t the current batchsize\n lambdaGP (float): coefficient of the gradient penalty as defined in the article\n gamma (float): regularization term of the gradient penalty, augment to minimize \"ghosts\"\n \"\"\"\n\n def __init__(self, batchSize, lambdaGP, gamma=1, device='cpu'):\n self.batchSize = batchSize\n self.lambdaGP = lambdaGP\n self.gamma = gamma\n self.device = device\n\n def __call__(self, netD, real_data, fake_data, progress):\n alpha = torch.rand(self.batchSize, 1, 1, 1, requires_grad=True, device=self.device)\n # randomly mix real and fake data\n interpolates = real_data + alpha * (fake_data - real_data)\n # compute output of D for interpolated input\n disc_interpolates = netD(interpolates, progress)\n # compute gradients w.r.t the interpolated outputs\n gradients = grad(outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=torch.ones(disc_interpolates.size(), device=self.device),\n create_graph=True, retain_graph=True, only_inputs=True)[0].view(self.batchSize, -1)\n gradient_penalty = (((gradients.norm(2, dim=1) - self.gamma) / self.gamma) ** 2).mean() * self.lambdaGP\n\n return gradient_penalty\n","repo_name":"jeromerony/Progressive_Growing_of_GANs-PyTorch","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"81"} +{"seq_id":"10014035504","text":"import pandas as pd\n\nclass Parser:\n\n\tdef parse_donau(self, init_names=['Peak_ID'], parse_sample_ids=True):\n\t\tprint('Parsing Danube Samples...')\n\t\tdonau = pd.read_csv('../Data/donau.csv')\n\t\tprint('Parsing completed')\n\t\tnames = init_names\n\t\tif parse_sample_ids:\n\t\t\tsample_ids = []\n\t\tfor name in donau.keys():\n\t\t\tif 'Intensity' in name and 'DN' in name:\n\t\t\t\tnames.append(name)\n\t\t\t\tif parse_sample_ids:\n\t\t\t\t\tpos = name.find('DN')\n\t\t\t\t\tsample_ids.append(name[pos:pos+4])\n\n\t\tdonau = donau.loc[:, names]\n\t\tif parse_sample_ids:\n\t\t\treturn donau, sample_ids\n\t\telse:\n\t\t\treturn donau\n\n\tdef parse_meta(self, sample_ids, id_prefix):\n\t\tcols = ['Code', 'Name', 'type of sample', 'Pollution source category']\n\t\tmeta = pd.read_csv('../Data/meta.csv', usecols=cols)\n\t\tmeta = meta[meta['Code'].str.find(id_prefix)!=-1]\n\t\tmeta = meta[meta.Code.notnull()]\n\t\tmeta = meta.drop_duplicates(subset='Code')\n\t\tmeta.index = meta['Code']\n\t\tfor pol in ['WWTP', 'UPS', 'DS']:\n\t\t\tmeta.loc[meta['Name'].str.find(pol)!=-1, 'Pollution source category'] = pol\n\t\tmeta.loc[meta['type of sample'].str.find('WWTP')!=-1, 'Pollution source category'] = 'WWTP'\n\n\t\tlabels = []\n\t\tfor sample_id in sample_ids:\n\t\t\ttry:\n\t\t\t\tlabels.append(meta.at[sample_id,'Pollution source category'])\n\t\t\texcept KeyError:\n\t\t\t\tlabels.append('blank')\n\t\t\t\n\t\treturn meta, labels\n","repo_name":"pstahlhofen/ufzintern","sub_path":"Preliminary_Work/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23098295115","text":"import argparse, torch, torchaudio, faiss, pickle, os\nfrom torchaudio.sox_effects import apply_effects_tensor\nfrom torchaudio.transforms import MelSpectrogram\nfrom tqdm import tqdm\n\nimport numpy as np\n\n# Load the model\nwav2mel = torch.jit.load(\"wav2mel.pt\")\ndvector = torch.jit.load(\"dvector.pt\").eval()\nid_to_name={}\n\ndef audio_procession(file_path):\n # Load the waveform\n wav_tensor, sample_rate = torchaudio.load(file_path)\n mel_tensor = wav2mel(wav_tensor, sample_rate) # shape: (frames, mel_dim)\n\n # Compute embedding\n emb_tensor = dvector.embed_utterance(mel_tensor) # shape: (emb_dim)\n #emb_tensor = dvector.embed_utterances([mel_tensor_1, mel_tensor_2]) # shape: (emb_dim)\n #print(emb_tensor.shape)\n return emb_tensor\n\ndef multi_segment_audio_processing(directory_path, speaker_id):\n mel_tensors = [] # List to store mel tensors for all segments\n\n # Iterate over all audio files in the directory\n for file_name in os.listdir(directory_path):\n if file_name.endswith('.wav') and file_name.startswith('20170001P00' + speaker_id):\n file_path = os.path.join(directory_path, file_name)\n\n # Process the audio and obtain the mel tensor\n wav_tensor, sample_rate = torchaudio.load(file_path)\n mel_tensor = wav2mel(wav_tensor, sample_rate)\n mel_tensors.append(mel_tensor)\n else:\n file_path = os.path.join(directory_path, file_name)\n # Process the audio and obtain the mel tensor\n wav_tensor, sample_rate = torchaudio.load(file_path)\n mel_tensor = wav2mel(wav_tensor, sample_rate)\n mel_tensors.append(mel_tensor)\n # Use dvector.embed_utterances to convert mel tensors of multiple segments\n emb_tensor = dvector.embed_utterances(mel_tensors) # shape: (emb_dim)\n\n return emb_tensor\n\ndef register_file(file_path):\n index_file = \"emb_index.index\"\n if os.path.exists(index_file):\n # Load the existing index\n index = faiss.read_index(index_file)\n else:\n # Create a new index\n index = faiss.IndexFlatL2(256) # Assuming the dimension of your embeddings is 256\n\n # Load the existing names list\n if os.path.exists('names.pkl'):\n with open('names.pkl', 'rb') as f:\n names = pickle.load(f)\n else:\n names = [] # Create a new list to store the names\n\n file_name = os.path.basename(file_path)\n speaker_id = file_name # Extract the four-digit ID from the file name\n\n if speaker_id not in names: # Check if speaker ID is already registered\n # Process the audio file\n emb_tensor = audio_procession(file_path)\n emb_numpy = emb_tensor.detach().numpy().reshape(1, -1)\n\n # Add the new vector to the index\n index.add(emb_numpy)\n # Add the name to the list\n names.append(speaker_id)\n\n # Save the names list to a file\n with open('names.pkl', 'wb') as f:\n pickle.dump(names, f)\n\n # Write the index back to the file\n faiss.write_index(index, index_file)\n print(\"File registered successfully.\")\n else:\n print(\"File already registered.\")\n\ndef registration(directory_path):\n index_file = \"emb_index.index\"\n if os.path.exists(index_file):\n # Load the existing index\n index = faiss.read_index(index_file)\n else:\n # Create a new index\n index = faiss.IndexFlatL2(256) # Assuming the dimension of your embeddings is 256\n\n # Load the existing names list\n if os.path.exists('names.pkl'):\n with open('names.pkl', 'rb') as f:\n names = pickle.load(f)\n else:\n names = [] # Create a new list to store the names\n\n # Get a list of all speaker IDs\n speaker_ids = set()\n file_list = [file_name for file_name in os.listdir(directory_path) if file_name.endswith('.wav')]\n for file_name in file_list:\n if file_name.startswith('20170001P00'):\n speaker_id = file_name[-12:-8] # Extract the four-digit ID from the file name\n else:\n speaker_id=file_name[:-4]\n speaker_ids.add(speaker_id)\n \n # Remove already registered speaker IDs\n speaker_ids = speaker_ids.difference(names)\n\n # Loop over remaining speaker IDs with a progress bar\n progress_bar = tqdm(speaker_ids, desc=\"Processing speakers\", unit=\"speaker\")\n for speaker_id in progress_bar:\n # Process multiple segments of the same speaker\n emb_tensor = multi_segment_audio_processing(directory_path, speaker_id)\n emb_numpy = emb_tensor.detach().numpy().reshape(1, -1)\n\n # Add the new vector to the index\n index.add(emb_numpy)\n # Add the name to the list\n names.append(speaker_id)\n\n # Save the names list to a file\n with open('names.pkl', 'wb') as f:\n pickle.dump(names, f)\n\n # Write the index back to the file\n faiss.write_index(index, index_file)\n\n\ndef recognition_img(emb_tensor):\n # Load the names list from the file\n with open('img_names.pkl', 'rb') as f:\n names = pickle.load(f)\n\n emb_numpy = emb_tensor.detach().numpy().reshape(1, -1)\n index_file = \"img_index.index\"\n index = faiss.read_index(index_file)\n k = 1\n radius=9\n lims, D, I = index.range_search(emb_numpy, radius)\n if len(D) > 0:\n # Find the closest result\n closest_index = np.argmin(D)\n # Find the ID of the closest result\n closest_id = I[closest_index]\n # Look up the name for the closest result\n closest_name = names[closest_id]\n print(closest_name)\n \ndef recognition(file_path):\n # Load the names list from the file\n with open('names.pkl', 'rb') as f:\n names = pickle.load(f)\n \n emb_tensor = audio_procession(file_path)\n recognition_img(emb_tensor) \n emb_numpy = emb_tensor.detach().numpy().reshape(1, -1)\n index_file = \"emb_index.index\"\n index = faiss.read_index(index_file)\n k = 10\n D, I = index.search(emb_numpy, k)\n print(\"D\",D)\n # Look up the names for the returned IDs\n returned_names = [names[i] for i in I[0]]\n\n print(returned_names)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Demo script\")\n parser.add_argument(\"--file\", type=str, help=\"Input file path\")\n parser.add_argument(\"--mode\", type=str, help=\"Mode: register or recognize\")\n args = parser.parse_args()\n \n if args.mode == \"register\":\n register_file(args.file)\n elif args.mode == \"recognize\":\n recognition(args.file)\n","repo_name":"hackermengzhi/solid-octo-spork","sub_path":"合并去重识别.py","file_name":"合并去重识别.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39388964626","text":"from country import Country\n\ndef read_csv_with_error_handling(date) -> tuple[int, int, Country]:\n \"\"\"\n Read a CSV file and yield each row as a tuple of (sng_id, user_id, country).\n If a row is invalid, print an error message and skip the row.\n\n --- Example ---\n Input:\n 1|1|DE\n 2|2|GB\n 3|3|NL\n\n Output:\n (1, 1, )\n (2, 2, )\n (3, 3, )\n\n --- Input ---\n\n date: str\n The date of the file to read, in the format YYYY-MM-DD.\n\n --- Output ---\n\n tuple[int, int, Country]\n A tuple of (sng_id, user_id, country) for each row in the file.\n \"\"\"\n\n file_path = f\"..\\data\\sample_listen-{date}_2Mlines.log\"\n\n with open(file_path, \"r\") as file:\n lines = file.readlines()\n for row in lines:\n try:\n record = row.strip().split('|')\n if len(record) == 3:\n if record[2] not in Country.__members__:\n raise Exception(f\"Invalid country: {record[2]}\")\n else:\n country = Country[record[2]]\n sng_id = int(record[0])\n user_id = int(record[1])\n yield sng_id, user_id, country\n else:\n raise Exception(f\"Invalid record: {record}\")\n except Exception as e:\n pass\n # print(f\"Error: {e} - Skipping row.\")\n\n\nif __name__ == \"__main__\":\n date = '2021-12-02'\n for row in read_csv_with_error_handling(date):\n sng_id, user_id, country = row\n\n\n","repo_name":"refuna/Top50Songs_internTest","sub_path":"scripts/datastream.py","file_name":"datastream.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30000364286","text":"# Graph generator with python\n\nimport random\nimport time\n\n\nclass GraphGenerator():\n\n def __init__(self, n):\n self.n = n\n self.graph = [[] for i in range(n)]\n self.score = {}\n\n def generate_graph(self):\n\n for i in range(self.n):\n # include an edge to all nodes\n for j in range(self.n):\n if i != j:\n if ((i, j) and (j, i)) not in self.score:\n score = random.randint(1, 100)\n self.score[(i, j)] = score\n self.score[(j, i)] = score\n\n self.graph[i].append(j)\n\n def show_graph(self):\n for i in range(self.n):\n print('Nodes connecteds to %d: ' % i, end=' ')\n for node in self.graph[i]:\n print('[%d to %d the score is %d] ' % (i, node, self.score[i, node]), end='\\t ')\n print()\n","repo_name":"pdonatilio/python_metaheuristics","sub_path":"tsp_00_graph_generator.py","file_name":"tsp_00_graph_generator.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18187988040","text":"from PyQt5.QtCore import QSettings, QTranslator, qVersion, QCoreApplication\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QAction, QFileDialog, QDialog, QProgressBar\nfrom PyQt5.QtWidgets import *\nfrom qgis.utils import iface\nfrom qgis.core import QgsTask, QgsApplication\nfrom osgeo import gdal, osr\nimport io\nimport numpy as np \nfrom math import *\n# Initialize Qt resources from file resources.py\nfrom .resources import *\n# Import the code for the dialog\nfrom .BHCgeoQGIS_dialog import BHCgeo_QGISDialog\nimport os.path\n\n\nclass BHCgeo_QGIS:\n \"\"\"QGIS Plugin Implementation.\"\"\"\n\n def __init__(self, iface):\n \"\"\"Constructor.\n\n :param iface: An interface instance that will be passed to this class\n which provides the hook by which you can manipulate the QGIS\n application at run time.\n :type iface: QgsInterface\n \"\"\"\n # Save reference to the QGIS interface\n self.iface = iface\n # initialize plugin directory\n self.plugin_dir = os.path.dirname(__file__)\n # initialize locale\n locale = QSettings().value('locale/userLocale')[0:2]\n locale_path = os.path.join(\n self.plugin_dir,\n 'i18n',\n 'bhcgeoqgis_{}.qm'.format(locale))\n\n if os.path.exists(locale_path):\n self.translator = QTranslator()\n self.translator.load(locale_path)\n\n if qVersion() > '4.3.3':\n QCoreApplication.installTranslator(self.translator)\n\n # Declare instance attributes\n self.actions = []\n self.menu = self.tr(u'&BHCgeo')\n\n # Check if plugin was started the first time in current QGIS session\n # Must be set in initGui() to survive plugin reloads\n self.first_start = None\n\n # noinspection PyMethodMayBeStatic\n def tr(self, message):\n \"\"\"Get the translation for a string using Qt translation API.\n\n We implement this ourselves since we do not inherit QObject.\n\n :param message: String for translation.\n :type message: str, QString\n\n :returns: Translated version of message.\n :rtype: QString\n \"\"\"\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('BHCgeo_QGIS', message)\n\n\n def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n \"\"\"Add a toolbar icon to the toolbar.\n\n :param icon_path: Path to the icon for this action. Can be a resource\n path (e.g. ':/plugins/foo/bar.png') or a normal file system path.\n :type icon_path: str\n\n :param text: Text that should be shown in menu items for this action.\n :type text: str\n\n :param callback: Function to be called when the action is triggered.\n :type callback: function\n\n :param enabled_flag: A flag indicating if the action should be enabled\n by default. Defaults to True.\n :type enabled_flag: bool\n\n :param add_to_menu: Flag indicating whether the action should also\n be added to the menu. Defaults to True.\n :type add_to_menu: bool\n\n :param add_to_toolbar: Flag indicating whether the action should also\n be added to the toolbar. Defaults to True.\n :type add_to_toolbar: bool\n\n :param status_tip: Optional text to show in a popup when mouse pointer\n hovers over the action.\n :type status_tip: str\n\n :param parent: Parent widget for the new action. Defaults None.\n :type parent: QWidget\n\n :param whats_this: Optional text to show in the status bar when the\n mouse pointer hovers over the action.\n\n :returns: The action that was created. Note that the action is also\n added to self.actions list.\n :rtype: QAction\n \"\"\"\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n # Adds plugin icon to Plugins toolbar\n self.iface.addToolBarIcon(action)\n\n if add_to_menu:\n self.iface.addPluginToRasterMenu(\n self.menu,\n action)\n\n self.actions.append(action)\n\n return action\n\n def initGui(self):\n \"\"\"Create the menu entries and toolbar icons inside the QGIS GUI.\"\"\"\n\n icon_path = ':/plugins/BHCgeoQGIS/figBHC.png'\n self.add_action(\n icon_path,\n text=self.tr(u'BHCgeo'),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n # will be set False in run()\n self.first_start = True\n\n\n def unload(self):\n \"\"\"Removes the plugin menu item and icon from QGIS GUI.\"\"\"\n for action in self.actions:\n self.iface.removePluginRasterMenu(\n self.tr(u'&BHCgeo'),\n action)\n self.iface.removeToolBarIcon(action)\n\n def select_output_file(self): \n filename = QFileDialog.getExistingDirectory(BHCgeo_QGIS.dlg, (\"Choose the output folder\"))\n BHCgeo_QGIS.dlg.lineEdit.setText(filename)\n\n def run(self):\n \"\"\"Run method that performs all the real work\"\"\"\n\n # Create the dialog with elements (after translation) and keep reference\n # Only create GUI ONCE in callback, so that it will only load when the plugin is started\n if self.first_start == True:\n self.first_start = False\n BHCgeo_QGIS.dlg = BHCgeo_QGISDialog()\n BHCgeo_QGIS.dlg.pushButton.clicked.connect(self.select_output_file)\n \n BHCgeo_QGIS.dlg.comboBox.clear() \n \n # meses_list = [\"January\",\"February\",\"March\",\"April\",\n # \"May\",\"June\",\"July\",\"August\",\"September\",\n # \"October\",\"November\",\"December\"] \n meses_list = [QCoreApplication.translate('self.dlg.comboBox', \"January\"),\n QCoreApplication.translate('self.dlg.comboBox', \"February\"),\n QCoreApplication.translate('self.dlg.comboBox', \"March\"),\n QCoreApplication.translate('self.dlg.comboBox', \"April\"),\n QCoreApplication.translate('self.dlg.comboBox', \"May\"),\n QCoreApplication.translate('self.dlg.comboBox', \"June\"),\n QCoreApplication.translate('self.dlg.comboBox', \"July\"),\n QCoreApplication.translate('self.dlg.comboBox', \"August\"),\n QCoreApplication.translate('self.dlg.comboBox', \"September\"),\n QCoreApplication.translate('self.dlg.comboBox', \"October\"),\n QCoreApplication.translate('self.dlg.comboBox', \"November\"),\n QCoreApplication.translate('self.dlg.comboBox', \"December\")] \n\n BHCgeo_QGIS.dlg.comboBox.addItems(meses_list)\n \n # show the dialog\n BHCgeo_QGIS.dlg.show()\n # Run the dialog event loop\n result = BHCgeo_QGIS.dlg.exec_()\n # See if OK was pressed\n if result:\n BHCgeo_QGIS.progress_bar = ProgessBar()\n BHCgeo_QGIS.progress_bar.show()\n \n\n\nclass HeavyTask(QgsTask):\n \"\"\"Here we subclass QgsTask\"\"\"\n def __init__(self, desc):\n QgsTask.__init__(self, desc)\n\n\n def array2raster(self,rasterfn,newRasterfn,array):\n raster = gdal.Open(rasterfn) #raster modelo\n geotransform = raster.GetGeoTransform()\n originX = geotransform[0] \n originY = geotransform[3]\n pixelWidth = self.instantiatePixelWidth\n pixelHeight = self.instantiatePixelHeight\n cols = raster.RasterXSize\n rows = raster.RasterYSize\n\n driver = gdal.GetDriverByName('GTiff')\n outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Float32)\n outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(array)\n outband.SetNoDataValue(self.NoData) #nao insere valor de NoData, mas sim, escolhe um valor dentre os existentes\n outRasterSRS = osr.SpatialReference()\n outRasterSRS.ImportFromWkt(raster.GetProjectionRef())\n outRaster.SetProjection(outRasterSRS.ExportToWkt())\n outband.FlushCache()\n return newRasterfn\n\n def run(self):\n \"\"\"This function is where you do the 'heavy lifting' or implement\n the task which you want to run in a background thread. This function \n must return True or False and should only interact with the main thread\n via signals\"\"\"\n \n percent = 0\n self.setProgress(percent)\n\n BHCgeo_QGIS.pastaSelecionada = BHCgeo_QGIS.dlg.lineEdit.text()\n self.diretorio = BHCgeo_QGIS.pastaSelecionada+\"\\\\\"\n \n listaMesDesordenada = ['Jan','Fev','Mar','Abr','Mai','Jun','Jul','Ago','Set','Out','Nov','Dez'] \n self.nomeMes = []\n BHCgeo_QGIS.mesEscolhidoIndex = BHCgeo_QGIS.dlg.comboBox.currentIndex() # <-----\n\n cont = BHCgeo_QGIS.mesEscolhidoIndex\n for mes in range(len(listaMesDesordenada) - BHCgeo_QGIS.mesEscolhidoIndex):\n self.nomeMes.append(listaMesDesordenada[cont])\n cont += 1\n cont = 0\n for mes in range(BHCgeo_QGIS.mesEscolhidoIndex):\n self.nomeMes.append(listaMesDesordenada[cont]) \n cont += 1\n #-----------------------------------------------------------\n\n self.NoData = -9999\n CAD_raster = gdal.Open(self.diretorio+\"cad.tif\") \n bandaUnicaCAD = CAD_raster.GetRasterBand(1)\n bandaUnicaCAD.SetNoDataValue(self.NoData)\n CAD_array = np.array(bandaUnicaCAD.ReadAsArray())\n CAD_list = []\n CAD_list.append(CAD_array.tolist())\n ETP_list = [[] for mes in self.nomeMes]\n P_list = [[] for mes in self.nomeMes]\n\n contMes = 0\n for mes in self.nomeMes:\n ETP_raster = gdal.Open(self.diretorio+\"etp\"+mes+\".tif\") \n bandaUnicaETP = ETP_raster.GetRasterBand(1)\n bandaUnicaETP.SetNoDataValue(self.NoData)\n ETP_array = np.array(bandaUnicaETP.ReadAsArray()) \n ETP_listMes = ETP_array.tolist()\n ETP_list[contMes].append(ETP_listMes)\n P_raster = gdal.Open(self.diretorio+\"p\"+mes+\".tif\") \n bandaUnicaP = P_raster.GetRasterBand(1)\n bandaUnicaP.SetNoDataValue(self.NoData)\n P_array = np.array(bandaUnicaP.ReadAsArray()) \n P_listMes = P_array.tolist()\n P_list[contMes].append(P_listMes)\n contMes += 1\n \n percent = 10\n self.setProgress(percent)\n amount = len(self.nomeMes)\n # ---------------- Verificando as condicoes para fazer os calculos ---------------------------\n\n for mes in self.nomeMes:\n #--- calculate a aprox size to put in the progress bar \n bit = (20-percent) / amount # max until this point - last point / len\n percent += bit # beggins the point at 10% \n self.setProgress(percent)\n #-------------------------------------------------------------\n CAD_raster = gdal.Open(self.diretorio+\"cad.tif\")\n ETP_raster = gdal.Open(self.diretorio+\"etp\"+mes+\".tif\") \n P_raster = gdal.Open(self.diretorio+\"p\"+mes+\".tif\")\n assert CAD_raster.RasterXSize == ETP_raster.RasterXSize == P_raster.RasterXSize \n assert CAD_raster.RasterYSize == ETP_raster.RasterYSize == P_raster.RasterYSize \n\n #-------------------- Substituindo valores de CAD = 0 por NoData ------------------------------\n\n for matriz in range(len(CAD_list)):\n for row in range(len(CAD_list[matriz])):\n for i in range(len(CAD_list[matriz][row])):\n if CAD_list[matriz][row][i] == 0:\n CAD_list[matriz][row].pop(i) # retira o cad 0 e substitui por NODATA\n CAD_list[matriz][row].insert(i,self.NoData) \n\n #-------------------- Retirando as linhas que vem do formato array ------------------------------\n\n CADFloatAll = []\n ETPFloatAll = [[] for mes in self.nomeMes]\n PFloatAll = [[] for mes in self.nomeMes]\n\n for matriz in range(len(CAD_list)):\n for row_cont in range(len(CAD_list[matriz])):\n for item in range(len(CAD_list[matriz][row_cont])):\n CADFloatAll.append(CAD_list[matriz][row_cont][item])\n\n for mes in range(len(self.nomeMes)):\n for matriz in range(len(ETP_list[mes])): \n for row_cont in range(len(ETP_list[mes][matriz])):\n for item in range(len(ETP_list[mes][matriz][row_cont])):\n ETPFloatAll[mes].append(ETP_list[mes][matriz][row_cont][item])\n PFloatAll[mes].append(P_list[mes][matriz][row_cont][item])\n \n percent = 20\n self.setProgress(percent)\n #--------------------------- Fazendo o calculo ---------------------------------------------\n\n ARM = [[] for i in range(len(self.nomeMes))]\n ETR = [[] for i in range(len(self.nomeMes))]\n B = [[] for i in range(len(self.nomeMes))]\n\n for mes in range(amount):\n #--- calculate a aprox size to put in the progress bar \n bit = (50-percent) / amount # max until this point - last point / len\n percent += bit # beggins the point at 10% \n self.setProgress(percent)\n #-------------------------------------------------------------\n if mes == 0: # primeiro mes\n for cell in range(len(CADFloatAll)):\n if PFloatAll[mes][cell] == self.NoData or ETPFloatAll[mes][cell] == self.NoData:\n ARM[mes].append(self.NoData) #nao faz o calculo para NoData\n else:\n ARM[mes].append(CADFloatAll[cell])\n\n cont_i = 0\n for i in PFloatAll[mes]:\n if CADFloatAll[cont_i] == self.NoData or PFloatAll[mes][cont_i] == self.NoData or ETPFloatAll[mes][cont_i] == self.NoData:\n B[mes].append(self.NoData) #nao faz o calculo para self.NoData\n ETR[mes].append(self.NoData)\n cont_i += 1\n \n elif PFloatAll[mes][cont_i] - ETPFloatAll[mes][cont_i] > 0: #excesso\n if ARM[mes][cont_i] + (PFloatAll[mes][cont_i] - ETPFloatAll[mes][cont_i]) >= CADFloatAll[cont_i]:\n B[mes].append(PFloatAll[mes][cont_i] - ETPFloatAll[mes][cont_i]) #sempre para o mes zero B = Pi-ETPi\n ETR[mes].append(ETPFloatAll[mes][cont_i])\n cont_i += 1\n else:\n assert ARM[mes][cont_i] + (PFloatAll[mes][cont_i] - ETPFloatAll[mes][cont_i]) >= CADFloatAll[cont_i]#, print(\"\\n\"+\n #\" ---> COMECOU COM MES ERRADO! O arm anterior deve ser igual a CAD quando roda o modelo a primeira vez! <--- \")\n break\n \n else: # defict ---> teoricamente nao deveria ter essa possibilidade no primeiro mes,pois deve ser escolhido um mes com P>ETP para iniciar\n form = ARM[mes][cont_i] * exp((PFloatAll[mes][cont_i] - ETPFloatAll[mes][cont_i]) / CADFloatAll[cont_i])\n if form > 0: \n # Neste primeiro mes, form = ARM do mes em questao (mes zero) e ARM[mes] == ARM do mes anterior\n B[mes].append((PFloatAll[mes][cont_i] + (ARM[mes][cont_i] - form)) - ETPFloatAll[mes][cont_i]) \n ARM_mes_anterior = ARM[mes][cont_i] #guarda o valor de arm que vai ser atualizado\n ARM[mes].pop(cont_i)\n ARM[mes].insert(cont_i,form)\n ETR[mes].append(PFloatAll[mes][cont_i] + (ARM_mes_anterior - form)) #teoricamente nao deveria ter ETR no primeiro mes\n cont_i += 1 #pois deve ser escolhido um mes com P>ETP para iniciar\n else:\n assert form > 0#, print(\" ---> ERRO MATEMATICO! Nao pode acontecer tal resultado. <--- \")\n break\n \n else: # outros meses\n cont_i = 0\n for i in PFloatAll[mes]:\n if CADFloatAll[cont_i] == self.NoData or PFloatAll[mes][cont_i] == self.NoData or ETPFloatAll[mes][cont_i] == self.NoData:\n B[mes].append(self.NoData) #nao faz o calculo para NoData\n ARM[mes].append(self.NoData) #nao faz o calculo para NoData\n ETR[mes].append(self.NoData)\n cont_i += 1\n \n elif PFloatAll[mes][cont_i] - ETPFloatAll[mes][cont_i] > 0: #excesso\n if ARM[mes-1][cont_i] + (PFloatAll[mes][cont_i] - ETPFloatAll[mes][cont_i]) >= CADFloatAll[cont_i]:\n ARM[mes].append(CADFloatAll[cont_i])\n ETR[mes].append(ETPFloatAll[mes][cont_i])\n B[mes].append(ARM[mes-1][cont_i] + (PFloatAll[mes][cont_i] - ETPFloatAll[mes][cont_i]) - CADFloatAll[cont_i])\n cont_i += 1\n else:\n ARM[mes].append(ARM[mes-1][cont_i] + (PFloatAll[mes][cont_i] - ETPFloatAll[mes][cont_i]))\n ETR[mes].append(ETPFloatAll[mes][cont_i])\n B[mes].append(0)\n cont_i += 1\n \n else: # defict\n form = ARM[mes-1][cont_i] * exp((PFloatAll[mes][cont_i] - ETPFloatAll[mes][cont_i]) / CADFloatAll[cont_i])\n if form > 0:\n ARM[mes].append(form)\n ETR[mes].append(PFloatAll[mes][cont_i] + (ARM[mes-1][cont_i]-ARM[mes][cont_i]))\n B[mes].append((PFloatAll[mes][cont_i] + (ARM[mes-1][cont_i]-ARM[mes][cont_i])) - ETPFloatAll[mes][cont_i])\n cont_i += 1\n else:\n assert form > 0#, print(\" ---> ERRO MATEMATICO! Nao pode acontecer tal resultado. <--- \")\n break\n percent = 50\n self.setProgress(percent)\n if BHCgeo_QGIS.dlg.checkBox_PR.isChecked(): # <-----\n\n # ---------------------------- PROVA REAL -------------------------------------\n\n listaRelatorio = [] \n texto = QCoreApplication.translate('report', '''The Verification Proof checks if the following conditions were respected, in each pixel:\n \n Sum(ETP) = Sum(ETR)+Sum(DEF)\n Sum(P) = Sum(ETR)+Sum(EXC)\n Sum(Alt) = 0\n\n Where DEF(Water Deficit) = B negative, EXC(Water Excess) = B positive and Alt\nis the alteration sufered by ARM, from one month to the next.\n\n If these conditions are not met, the report will point out the first pixel\nwhere the error occurred, with a tolerance of 0.9 mm. Therefore, it is very likely\nthat there are other pixels with the same error. This means that another month \nshould be chosen to be the first in the Climatic Water Balance (BHC) calculations.\n\n The month prior to the one chosen to start the BHC should have its ground water \nstorage totally filled, this means that in the previous month, ARM must be equal to \nCAD, that is, the month prior to the first (and preferably the first month as well)\nshould not be a month of water deficit.\n\n If you do not have an idea when to start the BHC, you should run several tests \n(Verification Proof) to identify when to start and thus produce the most reliable \noutputs.\n\n If it is a very large and/or very heterogeneous area, climatologically, and all\nVerification Proofs found out errors, the inputs are suggested to be fragmented in \nsmaller areas to better represent their climatological characteristics.\n\n\n ******************************* REPORT ******************************\n ''')\n\n listaRelatorio.append(texto)\n\n somatorioP = [ [] for cell in CADFloatAll ] #somatorio por pixel\n somatorioETP = [ [] for cell in CADFloatAll ]\n somatorioETR = [ [] for cell in CADFloatAll ]\n somatorioDEF = [ [] for cell in CADFloatAll ]\n somatorioEXC = [ [] for cell in CADFloatAll ]\n somatorioAlt = [ [] for cell in CADFloatAll ]\n\n amount_cell = len(CADFloatAll)\n for cell in range(amount_cell):\n #--- calculate a aprox size to put in the progress bar \n bit = (60-percent) / amount_cell # max until this point - last point / len\n percent += bit # beggins the point at 10% \n self.setProgress(percent)\n #-------------------------------------------------------------\n for mes in range(len(self.nomeMes)):\n if ETR[mes][cell] == self.NoData: # B[mes][cell] == NoData or PFloatAll[mes][cell] == NoData or ETPFloatAll[mes][cell] == NoData:\n somatorioP[cell].append(self.NoData)\n somatorioETP[cell].append(self.NoData)\n somatorioETR[cell].append(self.NoData)\n somatorioEXC[cell].append(self.NoData)\n somatorioDEF[cell].append(self.NoData)\n somatorioAlt[cell].append(self.NoData)\n else:\n somatorioP[cell].append(PFloatAll[mes][cell])\n somatorioETP[cell].append(ETPFloatAll[mes][cell])\n somatorioETR[cell].append(ETR[mes][cell])\n somatorioAlt[cell].append(ARM[mes][cell]-ARM[mes-1][cell])\n\n if B[mes][cell] > 0:\n somatorioEXC[cell].append(B[mes][cell])\n else:\n somatorioDEF[cell].append(B[mes][cell]) \n\n self.setProgress(60)\n\n for cell in range(len(CADFloatAll)):\n #--- calculate a aprox size to put in the progress bar \n bit = (70-percent) / amount_cell # max until this point - last point / len\n percent += bit # beggins the point at 10% \n self.setProgress(percent)\n #-------------------------------------------------------------\n if self.NoData in somatorioETR[cell]: # pois ETR eh saida com NoData nos lugares certos\n pass\n else:\n arredondandoSomETP = sum(somatorioETP[cell])\n arredondandoSomP = sum(somatorioP[cell])\n arredondandoSomETR = sum(somatorioETR[cell])\n arredondandoSomDEF = abs(sum(somatorioDEF[cell]))\n arredondandoSomEXC = sum(somatorioEXC[cell])\n arredondandoSomAlt = sum(somatorioAlt[cell])\n\n if arredondandoSomETP == arredondandoSomETR + arredondandoSomDEF:\n erro = \"SEM ERRO\"\n elif abs(arredondandoSomETP - (arredondandoSomETR + arredondandoSomDEF)) < 1: #limite aceitavel, em mm, para fins de arredondamento\n erro = \"SEM ERRO\"\n else: \n mensagemRelatorio = (\"\\nPixel: \"+str(cell)+\"\\n\"+\n QCoreApplication.translate(\"mensagemRelatorio\",\"Sum(ETP): \")+\n str(arredondandoSomETP)+\"\\n\"+\n QCoreApplication.translate(\"mensagemRelatorio\",\"Sum(ETR)+Sum(DEF): \")+\n str(arredondandoSomETR+arredondandoSomDEF)+\"\\n\\n\"+\n QCoreApplication.translate(\"mensagemRelatorio\", \n \"In this pixel, the Verification Proof found a possible error. Choose another month to start with.\"))\n listaRelatorio.append(mensagemRelatorio+\"\\n\")\n erro = \"ERRO\"\n break\n if arredondandoSomP == arredondandoSomETR + arredondandoSomEXC:\n erro = \"SEM ERRO\"\n elif abs(arredondandoSomP - (arredondandoSomETR + arredondandoSomEXC)) < 1: #limite aceitavel, em mm, para fins de arredondamento\n erro = \"SEM ERRO\"\n else: \n mensagemRelatorio = (\"\\nPixel: \"+str(cell)+\"\\n\"+\n QCoreApplication.translate(\"mensagemRelatorio\",\"Sum(P): \")+\n str(arredondandoSomP)+\"\\n\"+\n QCoreApplication.translate(\"mensagemRelatorio\",\"Sum(ETR)+Sum(EXC): \")+\n str(arredondandoSomETR+arredondandoSomEXC)+\"\\n\\n\"+\n QCoreApplication.translate(\"mensagemRelatorio\", \n \"In this pixel, the Verification Proof found a possible error. Choose another month to start with.\"))\n listaRelatorio.append(mensagemRelatorio+\"\\n\")\n erro = \"ERRO\"\n break\n if arredondandoSomAlt == 0:\n erro = \"SEM ERRO\"\n elif arredondandoSomAlt < 1:\n erro = \"SEM ERRO\"\n else:\n mensagemRelatorio = (\"\\nPixel: \"+str(cell)+\"\\n\"+\n QCoreApplication.translate(\"mensagemRelatorio\",\"Sum(Alt): \")+\n str(arredondandoSomAlt)+\"\\n\\n\"+\n QCoreApplication.translate(\"mensagemRelatorio\",\n \"In this pixel, the Verification Proof found a possible error. Choose another month to start with\"))\n listaRelatorio.append(mensagemRelatorio+\"\\n\")\n erro = \"ERRO\"\n break \n \n if erro == \"ERRO\":\n mensagemRelatorio = QCoreApplication.translate(\"mensagemRelatorio\",\n'''\\n--> The Verification Proof found out, in at least one pixel, the existence of a possible error.\n\n ***** CONSIDER GETTING STARTED WITH ANOTHER MONTH *****''')\n listaRelatorio.append(mensagemRelatorio+\"\\n\")\n elif erro == \"SEM ERRO\":\n mensagemRelatorio = QCoreApplication.translate(\"mensagemRelatorio\",\n'''\\n--> The Verification Proof found out that the conditions of equality, according to the formulas,\nwere maintained in all pixels.''')\n listaRelatorio.append(mensagemRelatorio+\"\\n\")\n else:\n mensagemRelatorio = str(erro) # nunca deve acontecer\n listaRelatorio.append(mensagemRelatorio+\"\\n\")\n \n abrirRelatorio = io.open(self.diretorio+QCoreApplication.translate(\"mensagemRelatorio\",\n \"Report.txt\"), mode=\"w\", encoding=\"utf-8\")\n for i in listaRelatorio:\n abrirRelatorio.write(unicode(i))\n abrirRelatorio.close()\n \n # -------------------- Criando os Rasters Finais -------------------------------\n percent = 70\n self.setProgress(percent)\n B_array = [[[] for rows in CAD_array] for mes in self.nomeMes] # cria os espacos para os rows que tem nos arquivos de entrada, para virar array\n ARM_array = [[[] for rows in CAD_array] for mes in self.nomeMes]\n ETR_array = [[[] for rows in CAD_array] for mes in self.nomeMes]\n\n for mes in range(len(self.nomeMes)):\n item_cont = 0\n for row in range(len(CAD_array)):\n for item in range(len(CAD_array[row])):\n B_array[mes][row].append(B[mes][item_cont])\n ARM_array[mes][row].append(ARM[mes][item_cont])\n ETR_array[mes][row].append(ETR[mes][item_cont])\n item_cont += 1 \n\n dataset = gdal.Open(self.diretorio+'cad.tif') #raster modelo de tamanho pixel\n geotransform = dataset.GetGeoTransform()\n if geotransform:\n self.instantiatePixelWidth = geotransform[1]\n self.instantiatePixelHeight = geotransform[5]\n\n rasterModelo = self.diretorio+'cad.tif' # usa os parametros do raster modelo\n\n if BHCgeo_QGIS.dlg.checkBox_B.isChecked(): # <-----\n contMes = 0\n for mes in self.nomeMes:\n rasterSaidaB = self.diretorio+'b'+mes+'.tif'\n my_array_B = np.array(B_array[contMes])\n self.saida_BHC = self.array2raster(rasterModelo, rasterSaidaB, my_array_B)\n #iface.addRasterLayer(saida_BHC) #tem que adicionar .self para que funcione, pois iface foi referenciado la em cima\n contMes += 1\n #--- calculate a aprox size to put in the progress bar \n bit = (70-percent) / amount # max until this point - last point / len\n percent += bit # beggins the point at 10% \n self.setProgress(percent)\n #-------------------------------------------------------------\n \n percent = 80\n self.setProgress(percent)\n if BHCgeo_QGIS.dlg.checkBox_ETR.isChecked(): # <-----\n contMes = 0\n for mes in self.nomeMes:\n rasterSaidaETR = self.diretorio+'etr'+mes+'.tif'\n my_array_ETR = np.array(ETR_array[contMes])\n self.saida_ETR = self.array2raster(rasterModelo, rasterSaidaETR, my_array_ETR)\n #iface.addRasterLayer(saida_ETR) #tem que adicionar .self para que funcione, pois iface foi referenciado la em cima\n contMes += 1\n #--- calculate a aprox size to put in the progress bar \n bit = (80-percent) / amount # max until this point - last point / len\n percent += bit # beggins the point at 10% \n self.setProgress(percent)\n #-------------------------------------------------------------\n \n percent = 90\n self.setProgress(percent)\n if BHCgeo_QGIS.dlg.checkBox_ARM.isChecked(): # <-----\n contMes = 0\n for mes in self.nomeMes:\n rasterSaidaARM = self.diretorio+'arm'+mes+'.tif'\n my_array_ARM = np.array(ARM_array[contMes])\n self.saida_ARM = self.array2raster(rasterModelo, rasterSaidaARM, my_array_ARM)\n #iface.addRasterLayer(saida_ARM) #tem que adicionar .self para que funcione, pois iface foi referenciado la em cima\n contMes += 1\n #--- calculate a aprox size to put in the progress bar \n bit = (99-percent) / amount # max until this point - last point / len\n percent += bit # beggins the point at 10% \n self.setProgress(percent)\n #-------------------------------------------------------------\n \n percent = 99\n self.setProgress(percent)\n return True\n\n\n def finished(self, result):\n \"\"\"This function is called automatically when the task is completed and is\n called from the main thread so it is safe to interact with the GUI etc here\"\"\"\n if result is False:\n iface.messageBar().pushMessage(QCoreApplication.translate('Task message','Task was cancelled'))\n else:\n iface.messageBar().clearWidgets()\n for mes in self.nomeMes:\n if BHCgeo_QGIS.dlg.checkBox_B.isChecked(): \n iface.addRasterLayer(self.diretorio+'b'+mes+'.tif')\n if BHCgeo_QGIS.dlg.checkBox_ETR.isChecked():\n iface.addRasterLayer(self.diretorio+'etr'+mes+'.tif')\n if BHCgeo_QGIS.dlg.checkBox_ARM.isChecked(): \n iface.addRasterLayer(self.diretorio+'arm'+mes+'.tif')\n percent = 100\n self.setProgress(percent)\n iface.messageBar().pushMessage(QCoreApplication.translate('Task message','Complete'))\n #ProgessBar.btn_cancel.setEnabled(False)\n\n\n\nclass ProgessBar(QDialog):\n def __init__(self, parent=None):\n QDialog.__init__(self, parent)\n self.resize(310, 140)\n self.lbl_info = QLabel('Info:', self) \n self.lbl_info.move(40, 25) # label with Info\n self.edit_info = QLineEdit(self)\n self.edit_info.resize(170, 20)\n self.edit_info.move(100, 20) # Show changing messages\n self.prog = QProgressBar(self)\n self.prog.resize(230, 30)\n self.prog.move(40, 55) \n self.newTask('BHCgeo')\n btn_close = QPushButton(QCoreApplication.translate('Task message','Close'),self)\n btn_close.move(190, 100)\n btn_close.clicked.connect(self.close_win)\n # ProgessBar.btn_cancel = QPushButton('Cancel Task', self)\n # ProgessBar.btn_cancel.move(40, 100)\n # ProgessBar.btn_cancel.clicked.connect(self.cancelTask)\n\n\n def newTask(self, message_task_description):\n \"\"\"Create a task and add it to the Task Manager\"\"\"\n self.task = HeavyTask(message_task_description)\n #connect to signals from the background threads to perform gui operations\n #such as updating the progress bar\n self.task.begun.connect(lambda: self.edit_info.setText(QCoreApplication.translate(\"Task message\",\"Calculating...\")))\n self.task.progressChanged.connect(lambda: self.prog.setValue(self.task.progress()))\n self.task.progressChanged.connect(lambda: self.setProgressBarMessages(self.task.progress()))\n self.task.taskCompleted.connect(lambda: self.edit_info.setText(QCoreApplication.translate('Task message','Complete')))\n self.task.taskTerminated.connect(self.TaskCancelled)\n QgsApplication.taskManager().addTask(self.task)\n\n\n def TaskCancelled(self):\n self.prog.setValue(0)\n self.edit_info.setText(QCoreApplication.translate('Task message','Task Cancelled'))\n\n\n def close_win(self):\n self.close()\n\n\n def setProgressBarMessages(self, val):\n # --- Progress bar in the QGIS user messages (top)\n if val <= 30:\n message = QCoreApplication.translate(\"Task message\",\"Starting...\")\n iface.messageBar().pushMessage(message)\n elif val < 60:\n message = QCoreApplication.translate(\"Task message\",\"Calculating water balance...\")\n iface.messageBar().pushMessage(message)\n elif val < 100:\n message = QCoreApplication.translate(\"Task message\",\"Preparing final raster...\")\n iface.messageBar().pushMessage(message)\n # elif val == 100:\n # iface.messageBar().clearWidgets()\n\n\n # def cancelTask(self):\n # self.task.cancel()","repo_name":"romariocarvalhoneto/BHCgeo","sub_path":"BHCgeoQGIS.py","file_name":"BHCgeoQGIS.py","file_ext":"py","file_size_in_byte":35562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6395251214","text":"from pages.login_page import LoginPage\nimport allure\n\n@allure.suite(\"Test login forms\")\nclass TestLogin:\n @allure.description(\"test valid email\")\n def test_valid_email(self,driver):\n login_page = LoginPage(driver,'https://www.ukazka.ru/login.php')\n login_page.open()\n login_page.fill_fields_and_submit()\n\n @allure.description(\"test invalid email\")\n def test_invalid_email(self, driver):\n login_page = LoginPage(driver, 'https://www.ukazka.ru/login.php')\n login_page.open()\n login_page.fill_fields_and_submit_with_error()\n\n @allure.description(\"test error message\")\n def test_error(self, driver, error_text=\"Неправильно указан Е-Мейл или пароль!\"):\n login_page = LoginPage(driver, 'https://www.ukazka.ru/login.php')\n login_page.open()\n login_page.show_error()\n assert error_text == error_text\n\n\n\n \n\n\n\n\n\n\n\n","repo_name":"newbie1818/ukazka_ui_tests","sub_path":"tests/test_login.py","file_name":"test_login.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22587450584","text":"from fractions import *\n\n\ndef get_num_of_sqrt_two_expressions_with_longer_numerator(n):\n if n < 1:\n raise ValueError\n num = 0\n cur = Fraction(3, 2)\n for i in range(n):\n num, cur = update_num_of_longer_numerators_and_cur_fraction(num, cur)\n return num\n\n\ndef update_num_of_longer_numerators_and_cur_fraction(num, cur):\n cur = 1 + 1 / (1 + cur)\n if is_numerator_longer(cur.numerator, cur.denominator):\n num += 1\n return num, cur\n\n\ndef is_numerator_longer(numerator, denominator):\n return len(str(numerator)) > len(str(denominator))\n\n\nif __name__ == \"__main__\":\n print(get_num_of_sqrt_two_expressions_with_longer_numerator(1000))\n","repo_name":"yehudav/Project-Euler","sub_path":"0057.py","file_name":"0057.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31040670676","text":"import argparse\n\ndef parse_input():\n parser = argparse.ArgumentParser(prog='Say hallo world')\n parser.add_argument('name', type=str, help='your name')\n args = parser.parse_args()\n return args.name\n\ndef get_greetings(name=''):\n return 'Nice to meet you {}!'.format(name) \n \nif __name__ == '__main__':\n name = parse_input()\n print(get_greetings(name))\n","repo_name":"lenarother/katas","sub_path":"01-say_hallo/say_hallo.py","file_name":"say_hallo.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70355723464","text":"from kafka import KafkaProducer\nimport json\nfrom data import get_data\nimport time\nimport logging\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(formatter)\nlogger.addHandler(stream_handler)\n\ndef json_serializer(data):\n return json.dumps(data).encode(\"utf-8\")\n\ndef get_partition(key, all, available):\n return 1 # 파티션을 특정할 수 있음\n\nproducer = KafkaProducer(\n bootstrap_servers=[\"192.168.0.2:9092\"],\n value_serializer=json_serializer,\n # compression_type='snappy',\n partitioner=get_partition\n)\n\n\nif __name__ == \"__main__\":\n try:\n while True:\n msg = get_data()\n logger.info(f\"send message: {msg}\")\n producer.send(\"mlops\", msg)\n time.sleep(1)\n except KeyboardInterrupt:\n logger.info(f\"stopped by a user\")\n finally:\n producer.close()\n","repo_name":"dream2globe/toy-mlops","sub_path":"app_kafka/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70984599304","text":"\"\"\"Helper functions for extracting values from request locations.\"\"\"\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Union\n\nfrom werkzeug.datastructures import MultiDict\n\nfrom flask_jeroboam._utils import is_sequence_field\nfrom flask_jeroboam.wrapper import current_app\n\n\ndef _extract_scalar(\n *,\n source: Union[MultiDict, dict],\n name: Optional[str],\n alias: Optional[str],\n **_kwargs,\n):\n \"\"\"Extract a scalar value from a source.\"\"\"\n return source.get(alias, source.get(name))\n\n\ndef _extract_sequence(\n *, source: MultiDict, name: Optional[str], alias: Optional[str], **_kwargs\n) -> List:\n \"\"\"Extract a Sequence value from a source.\"\"\"\n _values = source.getlist(alias)\n if len(_values) == 0:\n _values = source.getlist(name)\n return _values\n\n\ndef _extract_sequence_with_key_transformer(\n *, source: MultiDict, name: Optional[str], alias: Optional[str], **_kwargs\n):\n \"\"\"Apply the key transformer to the source.\"\"\"\n transformed_source = current_app.query_string_key_transformer(\n current_app, source.to_dict()\n )\n return _extract_scalar(source=transformed_source, name=name, alias=alias)\n\n\ndef _undirected_extraction(\n *,\n field,\n source,\n alias: str,\n name: str,\n has_key_transformer: bool,\n **_kwargs,\n):\n if is_sequence_field(field):\n values = _extract_sequence(source=source, name=name, alias=alias)\n if len(values) == 0 and has_key_transformer:\n values = _extract_sequence_with_key_transformer(\n source=source, name=name, alias=alias\n )\n else:\n values = _extract_scalar(source=source, name=name, alias=alias)\n return values\n\n\ndef _extract_subfields(\n *,\n source: MultiDict,\n fields: Dict,\n **_kwargs,\n) -> Dict:\n \"\"\"Extract a Sequence from subfields.\"\"\"\n has_key_transformer = (\n getattr(current_app, \"query_string_key_transformer\", False) is not None\n )\n return {\n field_name: _undirected_extraction(\n field=subfield,\n source=source,\n name=field_name,\n alias=subfield.alias,\n has_key_transformer=has_key_transformer,\n )\n for field_name, subfield in fields.items()\n }\n","repo_name":"jcbianic/flask-jeroboam","sub_path":"flask_jeroboam/view_arguments/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"34051138446","text":"#!/usr/bin/env python3\n\"\"\"4096 attempts to solve HW01 problem 7.\"\"\"\n\nfrom itertools import product\nfrom subprocess import Popen, PIPE\nfrom concurrent.futures import ThreadPoolExecutor\n\n\noctave = \"\"\"\nc = [%d; %d; %d; %d; %d; %d; 1; 1; 1];\nA = [1, 1, 1, 0, 0, 0, 0, 0, 0;\n 0, 0, 0, 1, 1, 1, 0, 0, 0;\n 0, 0, 0, 0, 0, 0, 1, 1, 1;\n 1, 0, 0, 1, 0, 0, 1, 0, 0;\n 0, 1, 0, 0, 1, 0, 0, 1, 0;\n 0, 0, 1, 0, 0, 1, 0, 0, 1;\n 1, 0, 0, 1, 0, 0, 0, 0, 0;\n 0, 1, 0, 0, 1, 0, 0, 0, 0;\n 0, 0, 1, 0, 0, 1, 0, 0, 0];\nb = [2; 1; 3; 2; 2; 2; 1; 1; 1];\nlb = [];\nub = [];\nctype = \"UUUSSSLLL\";\nvartype = \"CCCCCCCCC\";\nsense = 1;\n[x, f, status, extra] = glpk(c, A, b, lb, ub, ctype, vartype, sense)\n\"\"\"\n\n\ndef do_tup(tup):\n print(tup)\n script = octave % tup\n proc = Popen('octave-cli', stdin=PIPE, stdout=PIPE)\n proc.stdin.write(script.encode('utf8'))\n proc.stdin.close()\n output = proc.stdout.readlines()\n try:\n [int(x.strip()) for x in output[2:11]] # raise if not int!\n except:\n print(\"%r works!\")\n\n\ndef main():\n pool = ThreadPoolExecutor(8)\n futures = [pool.submit(do_tup, tup) for tup in product(*([[1,2,3,4]]*6))]\n for future in futures:\n future.result()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"brenns10/eecs477","sub_path":"hw01/p7.py","file_name":"p7.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34755827070","text":"import unicodedata\nimport functools\n# List Of English Stop Words\n_WORD_MIN_LENGTH = 3\n#Words that are not indexed\n_STOP_WORDS = frozenset([\n'a', 'about', 'above', 'above', 'across', 'after', 'afterwards', 'again', \n'against', 'all', 'almost', 'alone', 'along', 'already', 'also','although',\n'always','am','among', 'amongst', 'amoungst', 'amount', 'an', 'and', 'another',\n'any','anyhow','anyone','anything','anyway', 'anywhere', 'are', 'around', 'as',\n'at', 'back','be','became', 'because','become','becomes', 'becoming', 'been', \n'before', 'beforehand', 'behind', 'being', 'below', 'beside', 'besides', \n'between', 'beyond', 'bill', 'both', 'bottom','but', 'by', 'call', 'can', \n'cannot', 'cant', 'co', 'con', 'could', 'couldnt', 'cry', 'de', 'describe', \n'detail', 'do', 'done', 'down', 'due', 'during', 'each', 'eg', 'eight', \n'either', 'eleven','else', 'elsewhere', 'empty', 'enough', 'etc', 'even', \n'ever', 'every', 'everyone', 'everything', 'everywhere', 'except', 'few', \n'fifteen', 'fify', 'fill', 'find', 'fire', 'first', 'five', 'for', 'former', \n'formerly', 'forty', 'found', 'four', 'from', 'front', 'full', 'further', 'get',\n'give', 'go', 'had', 'has', 'hasnt', 'have', 'he', 'hence', 'her', 'here', \n'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself', 'him', \n'himself', 'his', 'how', 'however', 'hundred', 'ie', 'if', 'in', 'inc', \n'indeed', 'interest', 'into', 'is', 'it', 'its', 'itself', 'keep', 'last', \n'latter', 'latterly', 'least', 'less', 'ltd', 'made', 'many', 'may', 'me', \n'meanwhile', 'might', 'mill', 'mine', 'more', 'moreover', 'most', 'mostly', \n'move', 'much', 'must', 'my', 'myself', 'name', 'namely', 'neither', 'never', \n'nevertheless', 'next', 'nine', 'no', 'nobody', 'none', 'noone', 'nor', 'not', \n'nothing', 'now', 'nowhere', 'of', 'off', 'often', 'on', 'once', 'one', 'only',\n'onto', 'or', 'other', 'others', 'otherwise', 'our', 'ours', 'ourselves', 'out',\n'over', 'own','part', 'per', 'perhaps', 'please', 'put', 'rather', 're', 'same',\n'see', 'seem', 'seemed', 'seeming', 'seems', 'serious', 'several', 'she', \n'should', 'show', 'side', 'since', 'sincere', 'six', 'sixty', 'so', 'some', \n'somehow', 'someone', 'something', 'sometime', 'sometimes', 'somewhere', \n'still', 'such', 'system', 'take', 'ten', 'than', 'that', 'the', 'their', \n'them', 'themselves', 'then', 'thence', 'there', 'thereafter', 'thereby', \n'therefore', 'therein', 'thereupon', 'these', 'they', 'thickv', 'thin', 'third',\n'this', 'those', 'though', 'three', 'through', 'throughout', 'thru', 'thus', \n'to', 'together', 'too', 'top', 'toward', 'towards', 'twelve', 'twenty', 'two', \n'un', 'under', 'until', 'up', 'upon', 'us', 'very', 'via', 'was', 'we', 'well', \n'were', 'what', 'whatever', 'when', 'whence', 'whenever', 'where', 'whereafter',\n'whereas', 'whereby', 'wherein', 'whereupon', 'wherever', 'whether', 'which', \n'while', 'whither', 'who', 'whoever', 'whole', 'whom', 'whose', 'why', 'will', \n'with', 'within', 'without', 'would', 'yet', 'you', 'your', 'yours', 'yourself',\n'yourselves', 'the'])\n\ndef word_split(text):\n \"\"\"\n Split a text in words. Returns a list of tuple that contains\n (word, location) location is the starting byte position of the word.\n \"\"\"\n word_list = []\n wcurrent = []\n windex = None\n\n for i, c in enumerate(text):\n if c.isalnum():\n wcurrent.append(c)\n windex = i\n elif wcurrent:\n word = u''.join(wcurrent)\n word_list.append((windex - len(word) + 1, word))\n wcurrent = []\n\n if wcurrent:\n word = u''.join(wcurrent)\n word_list.append((windex - len(word) + 1, word))\n\n return word_list\n\ndef words_cleanup(words):\n \"\"\"\n Remove words with length less then a minimum and stopwords.\n \"\"\"\n cleaned_words = []\n for index, word in words:\n if len(word) < _WORD_MIN_LENGTH or word in _STOP_WORDS:\n continue\n cleaned_words.append((index, word))\n return cleaned_words\n\ndef words_normalize(words):\n \"\"\"\n Do a normalization process on words. In this case it is just to lower()\"\"\"\n normalized_words = []\n for index, word in words:\n wnormalized = word.lower()\n normalized_words.append((index, wnormalized))\n return normalized_words\n\ndef word_index(text):\n \"\"\"A helper method to process a text.It calls word split, normalize and cleanup.\"\"\"\n words = word_split(text)\n words = words_normalize(words)\n words = words_cleanup(words)\n return words\n\ndef inverted_index(text):\n \"\"\"\n Create an Inverted-Index of the specified text document-{word:[location]}\"\"\"\n inverted = {}\n for index, word in word_index(text):\n locations = inverted.setdefault(word, [])\n locations.append(index)\n return inverted\n\ndef inverted_index_add(inverted, doc_id, doc_index):\n \"\"\"\n Add Invertd-Index doc_index of the document doc_id to the \n Multi-Document Inverted-Index (inverted), \n using doc_id as document identifier.\n {word:{doc_id:[locations]}}\n \"\"\"\n for word, locations in doc_index.items():\n indices = inverted.setdefault(word, {})\n indices[doc_id] = locations\n return inverted\n\ndef search(inverted, query):\n \"\"\"\n Returns a set of documents id that contains all the words in the query.\n \"\"\"\n words = [word for _, word in word_index(query) if word in inverted]\n results = [set(inverted[word].keys()) for word in words]\n return functools.reduce(lambda x, y: x & y, results) if results else []\n\nif __name__ == '__main__':\n doc1 = \"\"\"\nJava was conceived by James Gosling, Patrick Naughton, Chris Warth, Ed Frank, and Mike\nSheridan at Sun Microsystems, Inc. in 1991. It took 18 months to develop the first working\nversion. This language was initially called “Oak,” but was renamed “Java” in 1995. Between\nthe initial implementation of Oak in the fall of 1992 and the public announcement of Java in\nthe spring of 1995, many more people contributed to the design and evolution of the language.\nBill Joy, Arthur van Hoff, Jonathan Payne, Frank Yellin, and Tim Lindholm were key\ncontributors to the maturing of the original prototype.\nSomewhat surprisingly, the original impetus for Java was not the Internet! Instead, the\nprimary motivation was the need for a platform-independent (that is, architecture-neutral)\nlanguage that could be used to create software to be embedded in various consumer electronic\ndevices, such as microwave ovens and remote controls.\n\"\"\"\n\n doc2 = \"\"\"\nThe fifth edition of West Coast Green, a conference focusing on \"green\" home \ninnovations and products, rolled into San Francisco's Fort Mason last week \nintent, per usual, on making our living spaces more environmentally friendly \n- one used-tire house at a time.\nTo that end, there were presentations on topics such as water efficiency and \nthe burgeoning future of Net Zero-rated buildings that consume no energy and \nproduce no carbon emissions.\n\"\"\"\n\n # Build Inverted-Index for documents\n inverted = {}\n documents = {'doc1':doc1, 'doc2':doc2}\n for doc_id, text in documents.items():\n doc_index = inverted_index(text)\n inverted_index_add(inverted, doc_id, doc_index)\n\n # Print Inverted-Index\n for word, doc_locations in inverted.items():\n print (word, doc_locations)\n # Search something and print results\n query = input(\"Enter a query word to be searched: \")\n print(inverted[query])\n \n","repo_name":"riyaminiarora/python","sub_path":"inverted index.py","file_name":"inverted index.py","file_ext":"py","file_size_in_byte":7433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25381642440","text":"import tarfile\nimport os\nimport glob\nimport sys\nimport re\nimport shutil\nimport time\nfrom datetime import datetime\nimport math\n\ndef span_c(str, c):\n\tnewstr=''+str+''\n\treturn newstr\n\ndef para_c(str, c):\n\tif c:\n\t\tnewpara='

    '+str+'

    \\n'\n\telse:\n\t\tnewpara='

    '+str+'

    \\n'\n\treturn newpara\n\ndef head_c(str):\n newhead='\\n'+str+'\\n'\n return newhead\n\ndef body_c(str):\n newbody='\\n'+str+'\\n'\n return newbody\n\ndef html_c(str):\n\tnewhtml='\\n'+str+'\\n'\n\treturn newhtml\n\ndef prettify(elem):\n \"\"\"Return a pretty-printed XML string for the Element.\n \"\"\"\n rough_string = ElementTree.tostring(elem, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")\n\ndef time_convert(msg_time, time_dif):\n mmatch=re.match(r'(.{15})(.{7})', msg_time)\n mtime=datetime.strptime(mmatch.group(1), \"%b %d %H:%M:%S\")\n mtime_tail=mmatch.group(2)\n mtime=str(datetime.fromtimestamp(mtime.timestamp()+time_dif))\n newtime=mtime+mtime_tail\n return newtime\n\n\ndef time_dif(gen_t, curtime):\n\tcurtime=datetime.strptime(curtime, \"%Y-%m-%d %H:%M:%S\")\n\tgen_t=datetime.strptime(gen_t, \"%Y-%m-%d %H:%M:%S\")\n\ttime_dif=curtime.timestamp()-gen_t.timestamp()\n\treturn time_dif\n\ndef current_time(curtime):\n\ttry:\n\t\ttuple_t=time.strptime(curtime, \"%m/%d/%Y %I:%M:%S %p\")\n\texcept ValueError as e:\n\t\ttuple_t=time.strptime(curtime, \"%m/%d/%Y %H:%M:%S\")\n\tnewtime=time.strftime(\"%Y-%m-%d %H:%M:%S\", tuple_t)\n\treturn newtime\n\ndef generate_showtech(prt_folder):\n\tfor each in os.listdir(prt_folder):\n\t\tif re.match(r'show\\-output.*', each):\n\t\t\tos.rename(prt_folder+'/'+each, prt_folder+'/tech.log')\n\t\telif re.match(r'description.*', each):\n\t\t\tos.rename(prt_folder+'/'+each, prt_folder+'/description.log')\n\n\ndef generate_output(prt_folder, hours_d, time_seg, start_time):\n\tif hours_d==time_seg:\n\t\tmax=0\n\telse:\n\t\tmax=hours_d-time_seg\n\n\twith open (prt_folder+'/output.html', \"w\") as newf:\n\t\twith open ('output.html', \"r\") as f:\n\t\t\tfor line in f:\n\t\t\t\tif re.search('name\\=\\\"start\\_time.*\\>', line):\n\t\t\t\t\tnewf.write(re.sub('name\\=\\\"start\\_time.*\\>', 'name=\"start_time\" min=\"0\" max=\"'+str(max)+'\" step=\"0.01\" value=\"'+str(start_time)+'\">', line))\n\t\t\t\telif re.search('This\\ PRT.*', line):\n\t\t\t\t\tnewf.write(re.sub('This\\ PRT.*\\<', 'This PRT file contains about '+str(hours_d)+'-hour log messages, to reduce overall network traffic volume, the green bar would contain about '+str(time_seg)+'-hour log messages.<', line))\n\t\t\t\telif re.search('\\\"prt\\_folder\\\"\\ value\\=\\\"', line):\n\t\t\t\t\tnewf.write(re.sub('\\\"prt\\_folder\\\"\\ value\\=\\\"', '\"prt_folder\" value=\"'+prt_folder, line))\n\t\t\t\telif re.search('iframe', line):\n\t\t\t\t\tnewf.write(re.sub('iframe\\ src\\=\\\"', 'iframe src=\"/'+prt_folder, line))\n\t\t\t\telse:\n\t\t\t\t\tnewf.write(line)\n\ndef generate_fixed_allmsgs(prt_folder, all_messages, start_time, fixed_size, file_size, hours_d, time_seg):\n\tif hours_d==time_seg:\n\t\tshutil.copy(prt_folder+'/'+all_messages, prt_folder+'/all_messages_r')\n\telse:\n\t\tstart_pointer=math.ceil(start_time*file_size/hours_d)\n\t\twith open(prt_folder+'/all_messages_r', 'w') as newf:\n\t\t\twith open (prt_folder+'/'+all_messages, 'r', errors = 'ignore') as f:\n\t\t\t\tf.seek(start_pointer, 0)\n\t\t\t\tline=f.readline()\n\t\t\t\tline=f.readline()\n\t\t\t\twhile line is not None and line !='':\n\t\t\t\t\tnewf.write(line)\n\t\t\t\t\tline=f.readline()\n\t\t\t\t\tif (f.tell()-start_pointer)>=fixed_size:\n\t\t\t\t\t\tline=f.readline()\n\t\t\t\t\t\tnewf.write(line)\n\t\t\t\t\t\tbreak\n\n\ndef generate_all_sip_msgs(prt_folder, all_messages, time_dif):\n\tstyle=\"\"\"\n\t\"\"\"\n\n\tdoctype=''\n\n\twith open(prt_folder+'/'+all_messages, \"r\", errors = 'ignore') as f:\n\t\tallmsg=''\n\t\tsipmsg_flag=0\n\t\tsipmsg=''\n\t\tsipmsg_send=0\n\t\twith open(prt_folder+'/allmsgs.html', \"w\") as allmsgs:\n\t\t\twith open(prt_folder+'/sipmsgs.html', \"w\") as sipmsgs:\n\t\t\t\tfor line in f:\n\t\t\t\t\t# line=line.decode('utf-8')\n\t\t\t\t\tline=line.replace('&','&')\n\t\t\t\t\tline=line.replace('<','<')\n\t\t\t\t\tline=line.replace('>','>')\n\t\t\t\t\tnewline=''\n\t\t\t\t\tif sipmsg_flag==0:\n\t\t\t\t\t\tmsg=re.match(r'(\\d{4}\\ )(\\D{3})\\ (.{22})(.*)', line, re.M|re.I)\n\t\t\t\t\t\tif msg:\n\t\t\t\t\t\t\tnewtime=time_convert(msg.group(3), time_dif)\n\t\t\t\t\t\t\tnewline=msg.group(1)+' '+newtime+' '+msg.group(2)+msg.group(4)\n\t\t\t\t\t\t\tif msg.group(2)=='ERR':\n\t\t\t\t\t\t\t\tnewline=para_c(msg.group(1)+' '+span_c(newtime, 'd')+' '+span_c(msg.group(2)+' '+msg.group(4), 'e'), '')\n\t\t\t\t\t\t\telif msg.group(2)=='WRN':\n\t\t\t\t\t\t\t\tnewline=para_c(msg.group(1)+' '+span_c(newtime, 'd')+' '+span_c(msg.group(2)+' '+msg.group(4), 'w'), '')\n\t\t\t\t\t\t\telif re.search(r'\\={5}\\>.*SIP\\ MSG\\:\\:', msg.group(4), re.M):\n\t\t\t\t\t\t\t\tnewline=para_c(msg.group(1)+' '+span_c(newtime, 'd')+' '+span_c(msg.group(2)+' '+msg.group(4), 's'), '')\n\t\t\t\t\t\t\t\tsipmsg+=newline\n\t\t\t\t\t\t\t\tsipmsg_flag=1\n\t\t\t\t\t\t\t\tsipmsg_send=1\n\t\t\t\t\t\t\telif re.search(r'\\<\\={5}.*SIP\\ MSG\\:\\:', msg.group(4), re.M):\n\t\t\t\t\t\t\t\tnewline=para_c(msg.group(1)+' '+span_c(newtime, 'd')+' '+span_c(msg.group(2)+' '+msg.group(4), 'r'), '')\n\t\t\t\t\t\t\t\tsipmsg+=newline\n\t\t\t\t\t\t\t\tsipmsg_flag=1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnewline=para_c(msg.group(1)+' '+span_c(newtime, 'd')+' '+msg.group(2)+' '+msg.group(4), '')\n\t\t\t\t\telse:\n\t\t\t\t\t\tif not re.search(r'\\:\\:End\\-Of\\-Sip\\-Message\\:\\:', line):\n\t\t\t\t\t\t\tif sipmsg_send:\n\t\t\t\t\t\t\t\tnewline=para_c(span_c(line.strip(), 's'), 'sp')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnewline=para_c(span_c(line.strip(), 'r'), 'sp')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsipend=re.match(r'(\\d{4}\\ )(\\D{3})\\ (.{22})(.*)', line, re.M|re.I)\n\t\t\t\t\t\t\tnewtime=time_convert(sipend.group(3), time_dif)\n\t\t\t\t\t\t\tif sipmsg_send:\n\t\t\t\t\t\t\t\tnewline=para_c(sipend.group(1)+' '+span_c(newtime, 'd')+' '+span_c(sipend.group(2)+' '+sipend.group(4), 's'), '')\n\t\t\t\t\t\t\t\tsipmsg_send=0\n\t\t\t\t\t\t\t\tsipmsg_flag=0\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnewline=para_c(sipend.group(1)+' '+span_c(newtime, 'd')+' '+span_c(sipend.group(2)+' '+sipend.group(4), 'r'), '')\n\t\t\t\t\t\t\t\tsipmsg_flag=0\n\t\t\t\t\t\tsipmsg+=newline\n\t\t\t\t\tallmsg+=newline\n\t\t\t\tsipmsgs.write(doctype+html_c(head_c(style)+'\\n'+body_c(sipmsg)))\n\t\t\tallmsgs.write(doctype+html_c(head_c(style)+'\\n'+body_c(allmsg)))\n\ndef main():\n\tprt_folder = sys.argv[1]\n\tstart_time = float(sys.argv[2])\n\t# stop_time = sys.argv[3]\n\twith open(prt_folder+'/argv.txt', \"r\") as f:\n\t\tline=f.readline()\n\t\tline=''.join(line)\n\t\told_t=line.split(\",\")[1]\n\t\tgen_t=line.split(\",\")[2]\n\t\tcurtime=line.split(\",\")[3]\n\t\tfile_size=int(line.split(\",\")[4])\n\t\thours_d=float(line.split(\",\")[5])\n\t\tfixed_size=int(line.split(\",\")[6])\n\t\ttime_seg=float(line.split(\",\")[7])\n\n\tprint(prt_folder,start_time,old_t,gen_t,curtime)\n\ttime_diff=time_dif(gen_t, curtime)\n\n\tgenerate_showtech(prt_folder)\n\tgenerate_fixed_allmsgs(prt_folder, \"all_messages\", start_time, fixed_size, file_size, hours_d, time_seg)\n\tgenerate_all_sip_msgs(prt_folder, \"all_messages_r\", time_diff)\n\tgenerate_output(prt_folder, hours_d, time_seg, start_time)\n\nif __name__ == '__main__':\n main()\n","repo_name":"yozhang3/PRT","sub_path":"main_b.py","file_name":"main_b.py","file_ext":"py","file_size_in_byte":7321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22360944510","text":"# 연결 요소의 개수\nimport sys\nfrom collections import deque\n\nN, M = map(int, sys.stdin.readline().split())\ngraph = [[0] * (N+1) for _ in range(N+1)]\nvisited = [0] * (N+1)\n\nfor _ in range(M):\n u, v = map(int, sys.stdin.readline().split())\n graph[u][v] = 1\n graph[v][u] = 1\n\ndef bfs(node):\n Q = deque([])\n Q.append(node)\n visited[node] = 1\n while Q:\n n = Q.popleft()\n for i in range(1, N+1):\n if visited[i] == 0 and graph[n][i] == 1:\n Q.append(i)\n visited[i] = 1\n\nans = 0 \nfor i in range(1, N+1):\n if visited[i] == 0:\n bfs(i)\n ans += 1\nprint(ans)","repo_name":"zpqmdh/BOJ","sub_path":"graph/11724.py","file_name":"11724.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22361089770","text":"# 시리얼 번호\nimport sys\ninput = sys.stdin.readline\n\nN = int(input())\nwords = [input().rstrip() for _ in range(N)]\ndef sum_num(word):\n res = 0\n for i in word:\n if '0' <= i <= '9':\n res += int(i)\n return res\nwords.sort(key = lambda x:(len(x), sum_num(x), x))\n\nfor w in words:\n print(w)","repo_name":"zpqmdh/BOJ","sub_path":"implementation/1431.py","file_name":"1431.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74530393864","text":"# -*- coding: utf-8 -*-\nimport sys\nfrom os.path import join, dirname, abspath, exists\n\nsys_path = dirname(dirname(abspath(__file__)))\nif sys_path not in sys.path:\n sys.path.insert(0, sys_path)\n\nparent_sys_path = dirname(sys_path)\nif parent_sys_path not in sys.path:\n sys.path.insert(0, parent_sys_path)\n\nparent_sys_path = dirname(parent_sys_path)\nif parent_sys_path not in sys.path:\n sys.path.insert(0, parent_sys_path)\n\nimport utils.config_loader as config\nfrom utils.config_loader import logger, path_parser, config_meta, meta_model_name\nfrom os import listdir\nfrom os.path import isfile, isdir, join, dirname, abspath, exists\n\nimport copy\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport utils.tools as tools\n\nfrom argparse import ArgumentParser\nimport os\nimport io\nfrom tqdm import tqdm\nimport summ.rank_sent as rank_sent\nimport summ.select_sent as select_sent\nimport ir.ir_tools as ir_tools\nfrom bert_rr.data_pipe_cluster import Eli5TdqfsQSDataLoader, load_retrieved_sentences, load_retrieved_sentences_and_sids\nimport bert_rr.rr_config as rr_config\nimport shutil\nimport summ.compute_rouge as rouge\n\nfrom querysum.unilm_utils.unilm_eval import UniLMEval\nfrom querysum.unilm_utils.unilm_input import UniLMInput\n\nimport tools.general_tools as general_tools\n\n\"\"\"\n This module is for TDQFS dataset. It builds the following pipeline:\n\n rel_scores [compute, dump] =>\n rr_rank [load rel_scores, compute, dump]=>\n rr_records [load rr_rank, compute, dump]\n\n For the same IR records and query type, rel_scores and rr_rank only need to be processed once.\n\n rr_records can be generated with different threhold, e.g., confidence-based (CONF_THRESHOLD_RR) or TopK-based (TOP_NUM_RR).\n\n You can tune() to produce TopK Recall Curve, which can be used to evaluate Semantic Matching Model.\n\n\"\"\"\n\nrel_scores_dp = join(path_parser.graph_rel_scores, rr_config.RELEVANCE_SCORE_DIR_NAME)\n\nuse_tdqfs = 'tdqfs' in rr_config.IR_RECORDS_DIR_NAME\nassert use_tdqfs\n\nsentence_dp = path_parser.data_tdqfs_sentences\nquery_fp = path_parser.data_tdqfs_queries\ntdqfs_summary_target_dp = path_parser.data_tdqfs_summary_targets\n\ntest_cid_query_dicts = general_tools.build_tdqfs_cid_query_dicts(query_fp=query_fp, proc=True)\ncids = [cq_dict['cid'] for cq_dict in test_cid_query_dicts]\n\nrank_dp = join(path_parser.summary_rank, rr_config.RR_RANK_DIR_NAME_BERT)\nir_rec_dp = join(path_parser.summary_rank, rr_config.IR_RECORDS_DIR_NAME)\n\n\ndef init():\n # parse args\n parser = ArgumentParser()\n parser.add_argument('n_devices',\n nargs='?',\n default=4,\n help='num of devices on which model will be running on')\n\n args = parser.parse_args()\n all_device_ids = [0, 1, 2, 3, 4, 5, 6, 7]\n device = all_device_ids[:int(args.n_devices)]\n # device = [int(d) for d in args.n_devices]\n config_meta['device'] = device\n\n if not torch.cuda.is_available():\n placement = 'cpu'\n logger.info('path mode: {0}, placement: {1}'.format(config.path_type, placement))\n else:\n if len(device) == 1:\n placement = 'single'\n torch.cuda.set_device(device[0])\n elif config_meta['auto_parallel']:\n placement = 'auto'\n else:\n placement = 'manual'\n\n logger.info('path mode: {0}, placement: {1}, n_devices: {2}'.format(config.path_type, placement, args.n_devices))\n config_meta['placement'] = placement\n\n\ndef _place_model(model):\n # epoch, model, tokenizer, scores = load_checkpoint()\n if config_meta['placement'] == 'auto':\n model = nn.DataParallel(model, device_ids=config_meta['device'])\n logger.info('[place_model] Parallel Data to devices: {}'.format(config_meta['device']))\n\n if config_meta['placement'] in ('auto', 'single'):\n model.cuda()\n\n model.eval()\n return model\n\n\ndef _dump(model, cluster_loader, dump_dp):\n doc_rel_scores = []\n for _, batch in enumerate(cluster_loader):\n feed_dict = copy.deepcopy(batch)\n\n for (k, v) in feed_dict.items():\n with torch.no_grad():\n feed_dict[k] = Variable(v, requires_grad=False)\n\n n_sents, max_nt = feed_dict['token_ids'].size()\n # pred: (batch * max_ns_doc) * 2\n pred = model(feed_dict['token_ids'],\n feed_dict['seg_ids'],\n feed_dict['token_masks'])\n\n # logger.info(f'pred: {pred}')\n # logger.info(f'pred[0]: {pred[0]}')\n if type(pred) is tuple: # BertForSequenceClassification returns tuple\n pred = pred[0]\n\n n_cls = pred.size()[-1]\n if n_cls == 2:\n pred = F.softmax(pred, dim=-1)[:, 1]\n elif n_cls == 1:\n pred = pred.squeeze(-1)\n else:\n raise ValueError('Invalid n_cls: {}'.format(n_cls))\n\n rel_scores = pred.cpu().detach().numpy() # d_batch,\n logger.info('[_dump] rel_scores: {}'.format(rel_scores.shape))\n\n doc_rel_scores.append(rel_scores[:n_sents])\n\n rel_scores = np.concatenate(doc_rel_scores)\n\n dump_fp = join(dump_dp, cluster_loader.cid)\n\n tools.save_obj(obj=rel_scores, fp=dump_fp)\n logger.info('[_dump] dumping ranking file to: {0}'.format(dump_fp))\n\n\ndef get_data_loader_gen(ir_rec_dp):\n data_gen = Eli5TdqfsQSDataLoader(test_cid_query_dicts=test_cid_query_dicts,\n query_type=rr_config.QUERY_TYPE,\n retrieve_dp=ir_rec_dp,\n with_sub=rr_config.WITH_SUB)\n return data_gen\n\n\ndef dump_rel_scores():\n if exists(rel_scores_dp):\n raise ValueError('rel_scores_dp exists: {}'.format(rel_scores_dp))\n os.mkdir(rel_scores_dp)\n\n ir_rec_dp = join(path_parser.summary_rank, rr_config.IR_RECORDS_DIR_NAME)\n\n model = _place_model(model=config.bert_model)\n data_loader_generator = get_data_loader_gen(ir_rec_dp)\n for cluster_loader in data_loader_generator:\n if config.meta_model_name in ('bert_rr'):\n _dump(model, cluster_loader=cluster_loader, dump_dp=rel_scores_dp)\n else:\n raise ValueError('Invalid meta_model_name: {}'.format(config.meta_model_name))\n\n\ndef load_rel_scores(cid, rel_scores_dp):\n rel_scores_fp = join(rel_scores_dp, cid)\n return tools.load_obj(rel_scores_fp)\n\n\ndef rel_scores2rank():\n if exists(rank_dp):\n raise ValueError('rank_dp exists: {}'.format(rank_dp))\n os.mkdir(rank_dp)\n\n for cid in tqdm(cids):\n rel_scores = load_rel_scores(cid=cid, rel_scores_dp=rel_scores_dp)\n sent_ids = np.argsort(rel_scores)[::-1].tolist()\n\n sid_score_list = []\n for sid in sent_ids:\n sid_score = ('0_{}'.format(sid), rel_scores[sid])\n sid_score_list.append(sid_score)\n\n original_sents, _ = load_retrieved_sentences(retrieved_dp=ir_rec_dp, cid=cid)\n rank_records = rank_sent.get_rank_records(sid_score_list, sents=original_sents)\n\n n_sents = rank_sent.dump_rank_records(rank_records=rank_records, out_fp=join(rank_dp, cid), with_rank_idx=False)\n logger.info(f'Dump {n_sents} ranking records')\n\n\ndef rel_scores2rank_with_positional_sid():\n \"\"\"\n Sentences in the output rank file come with their original positions.\n\n For test UniLM model with positional ordered inputs.\n \"\"\"\n if exists(rank_dp):\n raise ValueError(f'rank_dp exists: {rank_dp}')\n os.mkdir(rank_dp)\n\n for cid in tqdm(cids):\n rel_scores = load_rel_scores(cid=cid, rel_scores_dp=rel_scores_dp)\n ranks = np.argsort(rel_scores)[::-1].tolist()\n\n rank_score_list = [(rank, rel_scores[rank]) for rank in ranks]\n\n original_sents, _ = load_retrieved_sentences_and_sids(retrieved_dp=ir_rec_dp, cid=cid)\n\n rank_records = rank_sent.get_rank_records_with_positional_sid(rank_score_list, sents=original_sents)\n\n n_sents = rank_sent.dump_rank_records(rank_records=rank_records, out_fp=join(rank_dp, cid), with_rank_idx=False)\n logger.info(f'Dump {n_sents} ranking records')\n\n\ndef tune():\n \"\"\"\n Tune RR confidence / compression rate / topK\n based on Recall Rouge 2.\n :return:\n \"\"\"\n FILTER = 'topK'\n if FILTER in ('conf', 'comp'):\n tune_range = np.arange(0.05, 1.05, 0.05)\n else: # topK\n interval = 10\n if rr_config.ir_config.FILTER == 'topK':\n end = rr_config.ir_config.FILTER_VAR + interval\n else:\n end = 200 + interval\n tune_range = range(interval, end, interval)\n\n rr_tune_dp = join(path_parser.summary_rank, rr_config.RR_TUNE_DIR_NAME_BERT)\n rr_tune_result_fp = join(path_parser.tune, rr_config.RR_TUNE_DIR_NAME_BERT)\n with open(rr_tune_result_fp, mode='a', encoding='utf-8') as out_f:\n headline = 'Filter\\tRecall\\tF1\\n'\n out_f.write(headline)\n\n for filter_var in tune_range:\n if exists(rr_tune_dp): # remove previous output\n shutil.rmtree(rr_tune_dp)\n os.mkdir(rr_tune_dp)\n\n for cid in tqdm(cids):\n retrieval_params = {\n 'model_name': rr_config.RR_MODEL_NAME_BERT,\n 'cid': cid,\n 'filter_var': filter_var,\n 'filter': FILTER,\n 'deduplicate': None,\n 'min_ns': rr_config.RR_MIN_NS\n }\n\n if meta_model_name.startswith('bert_squad') and config.squad_var.startswith('bert_shared'):\n retrieval_params['norm'] = True\n\n retrieved_items = ir_tools.retrieve(**retrieval_params)\n summary = '\\n'.join([item[-1] for item in retrieved_items])\n # print(summary)\n with open(join(rr_tune_dp, cid), mode='a', encoding='utf-8') as out_f:\n out_f.write(summary)\n\n performance = rouge.compute_rouge_for_cont_sel_in_sentences_tdqfs(rr_tune_dp, ref_dp=tdqfs_summary_target_dp)\n with open(rr_tune_result_fp, mode='a', encoding='utf-8') as out_f:\n if FILTER in ('conf', 'comp'):\n rec = '{0:.2f}\\t{1}\\n'.format(filter_var, performance)\n else:\n rec = '{0}\\t{1}\\n'.format(filter_var, performance)\n\n out_f.write(rec)\n\n if exists(rr_tune_dp): # remove previous output\n shutil.rmtree(rr_tune_dp)\n\n\ndef rr_rank2records():\n rr_rec_dp = join(path_parser.summary_rank, rr_config.RR_RECORD_DIR_NAME_BERT)\n\n if exists(rr_rec_dp):\n raise ValueError('rr_rec_dp exists: {}'.format(rr_rec_dp))\n os.mkdir(rr_rec_dp)\n\n for cid in tqdm(cids):\n retrieval_params = {\n 'model_name': rr_config.RR_MODEL_NAME_BERT,\n 'cid': cid,\n 'filter_var': rr_config.FILTER_VAR,\n 'filter': rr_config.FILTER,\n 'deduplicate': None,\n 'min_ns': rr_config.RR_MIN_NS\n }\n \n # todo: examine if we want to normalize scores for bert_rr\n if meta_model_name.startswith('bert_squad') and config.squad_var.startswith('bert_shared'):\n retrieval_params['norm'] = True\n\n retrieved_items = ir_tools.retrieve(**retrieval_params)\n ir_tools.dump_retrieval(fp=join(rr_rec_dp, cid), retrieved_items=retrieved_items)\n\n\ndef select_e2e_tdqfs():\n # graph_tools.select_end2end(model_name=model_name, omega=omega)\n params = {\n 'model_name': rr_config.RR_MODEL_NAME_BERT,\n 'length_budget_tuple': ('nw', 250),\n 'cos_threshold': 0.6, # do not pos cosine similarity criterion?\n 'retrieved_dp': ir_rec_dp,\n 'cc_ids': cids,\n }\n select_sent.select_end2end_for_eli5(**params)\n\n\ndef compute_rouge_tdqfs():\n text_params = {\n 'model_name': rr_config.RR_MODEL_NAME_BERT,\n 'length_budget_tuple': ('nw', 250),\n 'cos_threshold': 0.6, # do not pos cosine similarity criterion?\n }\n text_dp = tools.get_text_dp_for_eli5(**text_params)\n\n rouge_parmas = {\n 'text_dp': text_dp,\n 'ref_dp': tdqfs_summary_target_dp,\n }\n rouge_parmas['length'] = 250\n \n output = rouge.compute_rouge_for_tdqfs(**rouge_parmas)\n return output\n\n\ndef get_text_dp():\n \"\"\"\n Copied from bert_marge/main.py.\n \n \"\"\"\n assert rr_config.USE_TEXT\n dn = rr_config.TEXT_DIR_NAME\n text_dp = path_parser.summary_text / dn\n print(f'Build from text_dp: {text_dp}')\n return text_dp\n\n\ndef build_unilm_input(src):\n \"\"\"\n Copied from bert_marge/main.py.\n \n \"\"\"\n if src == 'rank':\n rank_dp = join(path_parser.summary_rank, rr_config.RR_RANK_DIR_NAME_BERT)\n text_dp = None\n elif src == 'text':\n rank_dp = None\n text_dp = get_text_dp()\n \n unilm_in_params = {\n 'marge_config': rr_config,\n 'rank_dp': rank_dp,\n 'text_dp': text_dp,\n 'fix_input': True,\n 'cluster_ids': cids,\n 'prepend_len': rr_config.PREPEND_LEN,\n }\n \n # unilm_in_params['prepend_query'] = rr_config.QUERY_TYPE \n unilm_in_params['prepend_query'] = rr_config.PREPEND_QUERY \n unilm_in_params['test_cid_query_dicts'] = test_cid_query_dicts\n unilm_input = UniLMInput(**unilm_in_params)\n \n if src == 'rank':\n unilm_input.build_from_rank()\n elif src == 'text':\n unilm_input.build_from_text()\n\n\ndef eval_unilm_out(eval_only=False):\n unilm_eval = UniLMEval(marge_config=rr_config, \n pre_tokenize_sent=False, \n max_eval_len=250, \n cluster_ids=cids,\n eval_tdqfs=use_tdqfs)\n\n if eval_only:\n unilm_eval.eval_unilm_out()\n return \n unilm_eval.build_and_eval_unilm_out()\n\n\nif __name__ == '__main__':\n init()\n dump_rel_scores()\n rel_scores2rank()\n\n rr_rank2records()\n select_e2e_tdqfs()\n compute_rouge_tdqfs()\n\n # build_unilm_input(src='rank')\n # eval_unilm_out(eval_only=False)\n","repo_name":"yumoxu/marge","sub_path":"src/frame/bert_rr/main_tdqfs.py","file_name":"main_tdqfs.py","file_ext":"py","file_size_in_byte":13771,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"81"} +{"seq_id":"44428763102","text":"\"\"\"The NEA Singapore Weather component.\"\"\"\nfrom __future__ import annotations\n\nfrom datetime import datetime, timedelta, timezone\nimport logging\n\nfrom aiohttp.client_reqrep import ClientResponse\nfrom async_timeout import timeout\nimport httpx\nfrom requests.exceptions import ConnectionError as ConnectError, HTTPError, Timeout\n\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import (\n CONF_SCAN_INTERVAL,\n CONF_SENSORS,\n CONF_TIMEOUT,\n CONF_REGION,\n)\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed\n\nfrom .nea import (\n Forecast2hr,\n Forecast24hr,\n Forecast4day,\n Temperature,\n Humidity,\n Wind,\n Rain,\n)\n\nfrom .const import (\n CONF_AREAS,\n CONF_RAIN,\n CONF_SENSOR,\n CONF_WEATHER,\n DEFAULT_SCAN_INTERVAL,\n DEFAULT_TIMEOUT,\n DOMAIN,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef get_platforms(config_entry: ConfigEntry) -> dict:\n \"\"\"Get list of platforms to be set up.\"\"\"\n _platforms = {\"platforms\": set(), \"entities\": list()}\n if config_entry.data[CONF_WEATHER]:\n _platforms[\"platforms\"].add(\"weather\")\n _platforms[\"entities\"].append(CONF_WEATHER)\n if config_entry.data.get(CONF_SENSOR, False):\n if config_entry.data[CONF_SENSORS].get(CONF_AREAS, [\"None\"]) != [\"None\"]:\n _platforms[\"platforms\"].add(\"sensor\")\n _platforms[\"entities\"].append(CONF_AREAS)\n if config_entry.data[CONF_SENSORS].get(CONF_REGION, False):\n _platforms[\"platforms\"].add(\"sensor\")\n _platforms[\"entities\"].append(CONF_REGION)\n if config_entry.data[CONF_SENSORS].get(CONF_RAIN, False):\n _platforms[\"platforms\"].add(\"camera\")\n _platforms[\"entities\"].append(CONF_RAIN)\n\n _platforms[\"platforms\"] = list(_platforms[\"platforms\"])\n\n return _platforms\n\n\nasync def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:\n \"\"\"Set up nea_sg_weather as config entry.\"\"\"\n coordinator = NeaWeatherDataUpdateCoordinator(hass, config_entry)\n await coordinator.async_config_entry_first_refresh()\n\n hass.data.setdefault(DOMAIN, {})[config_entry.entry_id] = coordinator\n\n _platforms = get_platforms(config_entry)[\"platforms\"]\n hass.async_add_job(\n hass.config_entries.async_forward_entry_setups(config_entry, _platforms)\n )\n\n return True\n\n\nasync def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:\n \"\"\"Unload a config entry.\"\"\"\n _platforms = get_platforms(config_entry)[\"platforms\"]\n unload_ok = await hass.config_entries.async_unload_platforms(\n config_entry, _platforms\n )\n\n if unload_ok:\n hass.data[DOMAIN].pop(config_entry.entry_id)\n\n return unload_ok\n\n\nclass NeaWeatherDataUpdateCoordinator(DataUpdateCoordinator):\n \"\"\"Class to manage fetching Nea Weather data.\"\"\"\n\n def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry) -> None:\n \"\"\"Initialize global Nea Weather data updater.\"\"\"\n self.timeout = config_entry.data.get(CONF_TIMEOUT, DEFAULT_TIMEOUT)\n self.weather = NeaWeatherData(hass, config_entry)\n self.update_interval = timedelta(\n minutes=config_entry.data.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)\n )\n self._hass = hass\n self._config_entry = config_entry\n self.data: NeaWeatherData.NeaData\n\n super().__init__(\n hass,\n _LOGGER,\n name=DOMAIN,\n update_interval=self.update_interval,\n )\n\n async def _async_update_data(self) -> NeaWeatherData.NeaData:\n \"\"\"Fetch data from NEA.\"\"\"\n try:\n async with timeout(self.timeout):\n return await self.weather.async_update()\n except Exception as err:\n raise UpdateFailed(f\"Update failed: {err}\") from err\n\n\nclass NeaWeatherData:\n \"\"\"Get the latest data from NEA API.\"\"\"\n\n def __init__(self, hass, config_entry):\n \"\"\"Initialize the data object.\"\"\"\n self._hass = hass\n self._config_entry = config_entry\n self.data: self.NeaData\n\n async def async_update(self) -> NeaData:\n \"\"\"Get the latest data from NEA API for entities registered.\"\"\"\n # Consolidate data requests to avoid redundant requests\n self.data = self.NeaData()\n _data_objects = list()\n _response = dict()\n if self._config_entry.data[CONF_WEATHER]:\n _data_objects += [\n self.data.forecast2hr,\n self.data.forecast24hr,\n self.data.forecast4day,\n self.data.temperature,\n self.data.humidity,\n self.data.wind,\n self.data.rain,\n ]\n else:\n if self._config_entry.data[CONF_SENSORS].get(CONF_AREAS, [\"None\"]) != [\n \"None\"\n ]:\n _data_objects += [self.data.forecast2hr]\n if self._config_entry.data[CONF_SENSORS].get(CONF_REGION, False):\n _data_objects += [self.data.forecast24hr]\n _data_objects = set(_data_objects)\n\n for data_object in _data_objects:\n await data_object.async_init()\n _response[data_object.__class__.__name__] = data_object.response\n\n # _LOGGER.debug(\"Data is: %s\", _response)\n _LOGGER.debug(\"Coordinator was updated at %s\", self.data.query_time)\n return self.data\n\n class NeaData:\n \"\"\"Container for Weather data\"\"\"\n\n def __init__(self) -> None:\n self.forecast2hr = Forecast2hr()\n self.forecast24hr = Forecast24hr()\n self.forecast4day = Forecast4day()\n self.temperature = Temperature()\n self.humidity = Humidity()\n self.wind = Wind()\n self.rain = Rain()\n self.query_time = datetime.now(timezone(timedelta(hours=8))).isoformat()\n","repo_name":"liangleslie/nea_sg_weather","sub_path":"custom_components/nea_sg_weather/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5949,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"81"} +{"seq_id":"19876016749","text":"# Constants:\n# amax : maximum possible acceleration\n# vmax : maximum possible velocity\n# vmin : slowest we would ever consider going\n\ndef curvature(path,i):\n return abs(deriv2(path,i) / max(0.001, pow(deriv1(path,i),2)))\n\ndef centripetal(curvature,v):\n return abs(curvature * pow(v,2))\n\ndef safe_speed(curvature, v_neighbor, ds):\n cent = centripetal(curvature,v_neighbor)\n if cent >= amax:\n return max(vmin, min(vmax, sqrt(abs(amax / curvature))))\n remaining_acceleration = sqrt(pow(amax,2) - pow(cent,2))\n dt = ds / max(vmin, v_neighbor)\n return max(vmin, min(vmax, v_neighbor + dt * remaining_acceleration))\n\ndef path_velocities(path, v_initial, v_final, ds):\n # path is the spline\n # v_initial is speed at beginning of path (current drone speed)\n # v_final is desired speed at end of the path\n # ds is the arc distance between path[i] and path[i+1]\n velocities = ... # initialize as list of numbers with same length (f) as path\n velocities[0] = v_initial\n # Forward iteration ensures each velocity is attainable based on previous speed\n for i in range(f-1):\n velocities[i+1] = safe_speed(curvature(path,i), velocities[i], ds)\n velocities[f] = min(v_final, velocities[f])\n # Backward iteration ensure each velocity allows for upcoming turns\n for i in range(f-1):\n j = f - 2 - i\n velocities[j] = safe_speed(curvature(path,j), velocities[j+1], ds)\n return velocities\n\n","repo_name":"Veilkrand/drone_race","sub_path":"spline_planner/scripts/plan_velocities.py","file_name":"plan_velocities.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"23425762272","text":"from __future__ import annotations\n\nimport typing as t\n\nimport pytest\nimport sqlalchemy\nimport sqlalchemy.orm\nfrom sqlalchemy.orm import Mapped\n\nfrom quart_sqlalchemy import SQLAlchemy\nfrom quart_sqlalchemy.model import Base\nfrom quart_sqlalchemy.model import SoftDeleteMixin\n\nfrom ...base import SimpleTestBase\n\n\nsa = sqlalchemy\n\n\nclass TestSoftDeleteFeature(SimpleTestBase):\n @pytest.fixture\n def Post(self, db: SQLAlchemy, User: t.Type[t.Any]) -> t.Generator[t.Type[Base], None, None]:\n class Post(SoftDeleteMixin, db.Model):\n id: Mapped[int] = sa.orm.mapped_column(primary_key=True)\n title: Mapped[str] = sa.orm.mapped_column()\n user_id: Mapped[t.Optional[int]] = sa.orm.mapped_column(sa.ForeignKey(\"user.id\"))\n\n user: Mapped[t.Optional[User]] = sa.orm.relationship(backref=\"posts\")\n\n db.create_all()\n yield Post\n\n def test_inactive_filtered(self, db: SQLAlchemy, Post: t.Type[t.Any]):\n with db.bind.Session() as s:\n with s.begin():\n post = Post(title=\"hello\")\n s.add(post)\n s.flush()\n s.refresh(post)\n\n with db.bind.Session() as s:\n with s.begin():\n post.is_active = False\n s.add(post)\n\n with db.bind.Session() as s:\n posts = s.scalars(sa.select(Post)).all()\n assert len(posts) == 0\n\n posts = s.scalars(sa.select(Post).execution_options(include_inactive=True)).all()\n assert len(posts) == 1\n select_post = posts.pop()\n\n assert select_post.id == post.id\n assert select_post.is_active is False\n","repo_name":"joeblackwaslike/quart-sqlalchemy","sub_path":"tests/integration/model/mixins_test.py","file_name":"mixins_test.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"21738169245","text":"# https://leetcode.com/problems/best-time-to-buy-and-sell-stock/\nclass Solution:\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n \n if len(prices) == 0:\n return 0\n \n min_val = prices[0]\n max_profit = 0 \n \n for price in prices:\n if min_val> price:\n min_val = price\n if price - min_val > max_profit:\n max_profit = price - min_val\n return max_profit ","repo_name":"Rishabhravindra/interview-prep","sub_path":"Sequences/best-time-to-sell-stock.py","file_name":"best-time-to-sell-stock.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28949650365","text":"import csv\n\n\nclass Question:\n def __init__(self, question_text: str, possible_answers: list):\n self.question_text = question_text\n self.possible_answers = possible_answers\n\n\nclass Answer:\n def __init__(self, answer_text: str, correct: bool = False):\n self.answer_text = answer_text\n self.correct = correct\n\n\nclass Quiz:\n def __init__(self, title: str, questions: list = []):\n self.title = title\n self.questions = questions\n\n def export_to_csv(self, file_path: str):\n with open(file_path, \"w\", newline=\"\") as csvfile:\n fieldnames = [\n \"Question\",\n \"Answer 1\",\n \"Answer 2\",\n \"Answer 3\",\n \"Answer 4\",\n \"Answer 5\",\n \"Answer 6\",\n \"Correct Answer\",\n ]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for question in self.questions:\n writer.writerow(self.get_question_as_dict(question))\n\n def get_question_as_dict(self, question: Question):\n q_row = {}\n q_row[\"Question\"] = question.question_text\n correct_answer = []\n for i, answer in enumerate(question.possible_answers):\n answer_header = f\"Answer {str(i).zfill(2) + 1}\"\n q_row[answer_header] = answer.answer_text\n if answer.correct:\n correct_answer.append(answer_header)\n q_row[\"Correct Answer\"] = \", \".join(correct_answer)\n return q_row\n","repo_name":"babanesma/udemy-quiz-scrape","sub_path":"quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19853173187","text":"\"\"\"\nUtility functions for plotting learning and accuracy curves.\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\n\ndef plot_learning_curve(\n train_losses_custom_optimizer,\n train_losses_optimizer,\n valid_losses,\n hyperparams,\n base_path=None,\n):\n \"\"\"\n Plot the learning curve showing training and validation losses across epochs.\n \"\"\"\n plt.figure(figsize=(10, 6), dpi=110)\n plt.plot(train_losses_custom_optimizer, label=\"Training Loss (Custom optimizer)\")\n plt.plot(train_losses_optimizer, label=\"Training Loss (Optimizer)\")\n plt.plot(valid_losses, label=\"Validation Loss\")\n plt.title(\"Learning Curve\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Loss\")\n plt.legend()\n plt.grid()\n hyperparams_str = f\"Epochs: {hyperparams[0]}, LR: {hyperparams[1]}\"\n plt.text(\n 0.5,\n 0.95,\n hyperparams_str,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n transform=plt.gca().transAxes,\n )\n plt.savefig(f\"{base_path}/lc_{hyperparams[0]}_{hyperparams[1]}.png\")\n plt.close()\n\n\ndef plot_accuracy_curve(\n train_accuracies_custom_optimizer,\n train_accuracies_optimizer,\n valid_accuracies,\n hyperparams,\n base_path=None,\n):\n \"\"\"\n Plot the accuracy curve showing training and validation accuracies across epochs.\n \"\"\"\n plt.figure(figsize=(10, 6), dpi=110)\n plt.plot(\n train_accuracies_custom_optimizer, label=\"Training Accuracy (Custom Optimizer)\"\n )\n plt.plot(train_accuracies_optimizer, label=\"Training Accuracy (Optimizer)\")\n plt.plot(valid_accuracies, label=\"Validation Accuracy\")\n plt.title(\"Accuracy Curve\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Accuracy\")\n plt.legend()\n plt.grid()\n hyperparams_str = f\"Epochs: {hyperparams[0]}, LR: {hyperparams[1]}\"\n plt.text(\n 0.5,\n 0.95,\n hyperparams_str,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n transform=plt.gca().transAxes,\n )\n plt.savefig(f\"{base_path}/acc_{hyperparams[0]}_{hyperparams[1]}.png\")\n plt.close()\n","repo_name":"jacob5412/2023FA_MSAI_349","sub_path":"neural-networks/utils/plot_evaluation_custom_optimizer.py","file_name":"plot_evaluation_custom_optimizer.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10684964717","text":"from page_loader.support_func import formatter, same_domain\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nfrom PIL import Image\nfrom cairosvg import svg2png\nimport io\n\ncurrent_dir = os.getcwd()\n\n# принимает только расположение html и название файла без расширения\n\n\ndef img_downloader(html_way, file_name, url):\n # проверяем создана ли папка для файлов, если нет - создаем\n if not os.path.isdir(f'{html_way}/{file_name}_files'):\n os.mkdir(f'{html_way}/{file_name}_files')\n save_folder = f'{html_way}/{file_name}_files'\n # открываем ранее скачанный файл для супа\n html_file = open(f'{html_way}/{file_name}.html')\n soup = BeautifulSoup(html_file, 'html.parser')\n # тут получаем все ссылки с тегом img src\n for link in soup.find_all('img'):\n # проверка на каком хосте расположен файл\n img_url = link.get('src')\n if same_domain(img_url, file_name) is True:\n # отправляем запрос\n p = requests.get(img_url)\n img_name = formatter(img_url)\n # конвертируем все что есть в пнг\n # (свг не конвертируется) через Image\n if img_url[-3:] == 'svg':\n img_name = img_name[:50]\n svg2png(\n url=f'{img_url}', write_to=f'{save_folder}/{img_name}.png'\n )\n else:\n img_name = img_name[:50]\n im = Image.open(io.BytesIO(p.content))\n im.save(f'{save_folder}/{img_name}.png')\n # заменяем ссылки в файле на локальные\n link['src'] = f'{save_folder}/{img_name}.png'\n # запсываем соуп обратно в файл\n with open(f'{html_way}/{file_name}.html', 'w') as f:\n f.write(soup.prettify())\n","repo_name":"SizNi/python-project-51","sub_path":"page_loader/img_download.py","file_name":"img_download.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1375740722","text":"import open3d as o3d\nimport os\nimport json\nfrom scipy.spatial.transform import Rotation\nimport numpy as np\n\ndef getPath(data):\n return os.path.join(r'Y:\\hddisk5\\users\\yifei\\omniobject3d\\OpenXD-OmniObject3D-New\\raw\\decimated',data,'Scan')\n\ndata_path = 'anise/anise_002'\n\nmesh = o3d.io.read_triangle_mesh(os.path.join(getPath(data_path),'Scan.obj'))\nwith open(os.path.join(getPath(data_path),'Scan.json'),'r') as f:\n data = json.load(f)\n\npoints_index=[0,1,7,2,3,6,4,5,3]\n\ndef drawbox(corner_box):\n lines = [[0, 1],[1,7],[7,2],[2,0],\n [3,6],[6,4],[4,5],[5,3],\n [0,3],[1,6],[7,4],[2,5]]\n\n def diff(a, b, len=1):\n if abs(a - b) \n\n\t\t\t\ttitle of the wikipedia page to be crawled or a \n\t\t\t\t\tfile with a list of titles (every row a different\n\t\t\t\t\ttitle)\n\t<output.csv> \toutput file location\n\nScraps the users that made revisions to the Wikipedia pages whos title \nare given in the <title> argument. The output is stored in CSV format \n(tab-delimited). It contains the following columns: \n\n\tname - title of the page\n\tuser - the user name or ip that made a revision\n\tn_edits - the total number of edits that user made\n\tn_minor_edits - the number of minor edits that user made\n\tfirst_edit - the time of the first edit\n\tlast_edit - the time of the last edit \n\tadded_bytes - the total number of bytes added\n\n\"\"\"\n\nUSER_AGENT = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n\nGET_DATA_SUCCESS = 1\nGET_DATA_FAILURE = 0 \n\n\ndef print_header(outputfile):\n\t\"\"\"\n\t\tPrints the header (column names) to a given output file\n\t\"\"\"\n\tprint(\"name\\tuser\\tn_edits\\tn_minor_edits\\tfirst_edit\\tlast_edit\\tadded_bytes\", file=outputfile)\n\ndef get_data(raw_data, name, file = sys.stdout): \n\t\"\"\"\n\t\tProcesses the raw data from the website and outputs \n\t\tthe data of interest\n\t\"\"\"\n\n\traw_data = raw_data.splitlines() # split it in lines\n\t# print(raw_data)\n\n\t# go to the table of relevance\n\ti = 0 \n\tline = raw_data[i]\n\twhile not 'Added (Bytes)' in line: \n\t\ti += 1\n\t\ttry: \n\t\t\tline = raw_data[i]\n\t\texcept: \n\t\t\treturn GET_DATA_FAILURE\n\n\ti += 3 # get to the starting point of the list\n\n\tline = raw_data[i]\n\n\twhile len(line) != 0: # not the end of the table\n\n\t\tprint(name, end='\\t', file=file)\n\t\tprint(line.strip(), end='\\t', file=file) # user name\n\t\ti += 5\n\t\tline = raw_data[i]\n\t\tprint(line.strip(), end='\\t', file=file) # number of edits\n\t\ti += 1\n\t\tline = raw_data[i]\n\t\tprint(line.strip(), end='\\t', file=file) # number of minor edits\n\t\ti += 2\n\t\tline = raw_data[i]\n\t\tprint(line.strip(), end='\\t', file=file) # first edit\n\t\ti += 1\n\t\tline = raw_data[i]\n\t\tprint(line.strip(), end='\\t', file=file) # last edit\n\t\ti += 2\n\t\tline = raw_data[i]\n\t\tprint(line.strip().replace(',', ''), file=file) # added_bytes\n\t\ti += 3\n\t\tline = raw_data[i]\n\n\treturn GET_DATA_SUCCESS\n\ndef sleep(options): \n\tif options.sleep != None: \n\t\ttime.sleep(options.sleep)\n\t\tif options.verbose: \n\t\t\tprint('Sleeping for %d seconds...'%options.sleep)\n\ndef get_link(article_title, top_users = 10000):\n\t\"\"\"\n\t\tReturns the link to be scraped given the title of an article \n\t\n\t\tArgs:\n\t\t\tarticle_title - title of the article\n\t\t\ttop_users - number of top contributors to be downloaded (Default: 10000)\n\n\t\"\"\"\n\treturn \"https://tools.wmflabs.org/xtools-articleinfo/?article=%s&project=en.wikipedia.org&editorlimit=%s#topeditors\"%(\n\t\t\t\tarticle_title, \n\t\t\t\tstr(top_users)\n\t\t\t)\n\ndef main():\n\n\tparser = OptionParser(usage=usage)\t\n\tparser.add_option(\"--attempts\", \"-a\", action=\"store\", dest=\"max_attempts\", default=100, type=int, \n\t\t\t\t \t\t\thelp=\"Maximum number of attempts to scrape the given pages (Default: 100)\")\n\tparser.add_option(\"--sleep\", \"-s\", action=\"store\", dest=\"sleep\", default=None, type=float, \n\t\t\t\t \t\t\thelp=\"Sleep time scraping two pages. (Default: no sleep)\")\n\tparser.add_option(\"--top\", \"-t\", action=\"store\", dest=\"top\", default=10000, type=int, \n\t\t\t\t \t\t\thelp=\"Number of top users to be scraped. (Default: 10000)\")\n\tparser.add_option(\"-v\", action=\"store_true\", dest=\"verbose\", default=False, \n\t\t\t\t \t\t\thelp=\"verbose.\")\n\t(options, args) = parser.parse_args()\n\t\n\t# process arguments\n\tif (len(args)!=2):\n\t\tparser.print_help()\n\t\treturn 1\n\n\t# process the list of articles \n\tarticle_titles, links = [], [] # initialize\n\n\tif os.path.isfile(args[0]): # in case a file is passed as first argument\n\t\twith open(args[0], 'r') as inputfile:\n\t\t\tfor article_title in inputfile: \n\t\t\t\tarticle_title.strip() \n\t\t\t\tarticle_title.replace(' ', '%20') # replace the spaces with %20 \n\t\t\t\tarticle_titles.append(article_title)\n\t\t\t\tlinks.append(get_link(article_title, top_users = options.top))\n\telse: # just one title given\t\n\t\tarticle_titles.append( args[0] )\n\t\tlinks.append(get_link(args[0], top_users = options.top))\n\n\t# get the outputfilename \n\toutputfilename = args[1] \n\toutputfile = open(outputfilename, 'w')\n\tprint_header(outputfile) # print the column names to the output file\n\n\tif options.verbose: # prints the list of pages to be scraped\n\t\tprint(\"\\nList of pages to be scraped:\\n\")\n\t\tprint(\"no.\\t\\ttitle\\t\\tlink\")\n\t\tprint(\"---\\t\\t-----\\t\\t----\")\n\t\tfor i, (article_title, link) in enumerate(zip(article_titles, links)): \n\t\t\tprint(\"%d\\t\\t%s\\t\\t%s\"%(i+1, article_title, link))\n\t\tprint(\"---\\t\\t-----\\t\\t----\\n\\n\")\n\n\tsuccessfully_scraped = 0 # number of successfully scraped pages\n\tn_links = len(links) # total number of links to be scraped\n\n\tattempt = 0 # number of attempts to scrape the articles in the list \n\tfailed_articles = [] # a list of the articles that failed to be scraped so far\n\n\t# try to scrape all the pages until you 1) scraped all, or 2) extended the number of attempts\n\twhile (successfully == n_links) or (attempt < options.max_attempts): \n\n\t\tfor article_title, link in zip(article_titles, links): \n\t\t\tif options.verbose: \n\t\t\t\tprint(\"Starting with scraping\") \n\n\t# go through all the wikipedia links\n\tfor name, link in zip(article_titles, links): \n\n\t\tif options.verbose: \n\t\t\ti += 1\n\t\t\tprint('%d of %d links processed (%.2f %%)\\t%d of %d links failed (%.2f %%)'%(i, n_links, float(i) / float(n_links) * 100, len(list_names_failed), i, float(len(list_names_failed)) / float(i) * 100))\n\t\t\tprint(\"Continuing with scraping %s (link: %s)\"%(name, link))\n\n\t\t# get the raw data\n\t\theaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n\t\tdata = readInDataFromURL(link, headers=USER_AGENT)\n\n\t\tflag = get_data(data, name, file=outputfile)\n\t\tif flag == GET_DATA_FAILURE: \n\t\t\tlist_names_failed.append(name)\n\n\t\tsleep(options)\n\t\n\tif options.verbose: \n\t\tprint(\"DONE\")\n\n\t# print list of failed names: \n\n\tprint('\\npages that failed:\\n\\n')\n\tfor page in list_names_failed: \n\t\tprint(page)\n\n\toutputfile.close()\t\n\t\nif __name__ == '__main__':\n\tsys.exit(main())","repo_name":"louisdijkstra/wikiscraper","sub_path":"bin/scrape-users2.py","file_name":"scrape-users2.py","file_ext":"py","file_size_in_byte":6490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25320450232","text":"import random\nimport math\nimport bisect\nimport sys\nimport types\n\n################################################################################\n# Dynamic Fixed Width <-> Float Value Conversion Functions\n################################################################################\n\nimport numpy\n\n# Dynamic float cast depending on the resolution specified in f_params\ndef float_cast(f_params, *args):\n # Use Python float for resolutions <= 32, numpy float64 for resolutions > 32\n if f_params['resolution'] <= 32: FloatCast = float\n else: FloatCast = numpy.float64\n\n if len(args) == 1: return FloatCast(args[0])\n return [FloatCast(x) for x in args]\n\n# Fixed Width Value to Float on f_params\ndef fixed2float_dynamic(x, f_params):\n amax, amin, xf, r = float_cast(f_params, f_params['argmax'], f_params['argmin'], x, 2**f_params['resolution'] - 1)\n return (amax - amin)*(xf/r) + amin\n\n# Float to Fixed Width Value on f_params\ndef float2fixed_dynamic(x, f_params):\n amax, amin, xf, r = float_cast(f_params, f_params['argmax'], f_params['argmin'], x, 2**f_params['resolution'] - 1)\n return int( ((xf - amin) / (amax - amin)) * r )\n\n# Float Epsilon to Fixed Width Epsilon on f_params\ndef epsilon_float2fixed_dynamic(x, f_params):\n amax, amin, xf, r = float_cast(f_params, f_params['argmax'], f_params['argmin'], x, 2**f_params['resolution'])\n return int( (x / (amax-amin)) * r )\n\n################################################################################\n# Faster Fixed Width <-> Float Value Conversion Functions\n################################################################################\n\n# Fixed Width Value to Float on f_params\ndef fixed2float_pyfloat(x, f_params):\n amax, amin, xf, r = float(f_params['argmax']), float(f_params['argmin']), float(x), float(2**f_params['resolution'] - 1)\n return (amax - amin)*(xf/r) + amin\n\n# Float to Fixed Width Value on f_params\ndef float2fixed_pyfloat(x, f_params):\n amax, amin, xf, r = float(f_params['argmax']), float(f_params['argmin']), float(x), float(2**f_params['resolution'] - 1)\n return int( ((xf - amin) / (amax - amin)) * r )\n\n# Float Epsilon to Fixed Width Epsilon on f_params\ndef epsilon_float2fixed_pyfloat(x, f_params):\n amax, amin, xf, r = float(f_params['argmax']), float(f_params['argmin']), float(x), float(2**f_params['resolution'])\n return int( (x / (amax-amin)) * r )\n\n################################################################################\n# Choose our conversion Functions (Dynamic vs All Pyfloat)\n################################################################################\n\n#fixed2float = fixed2float_pyfloat\n#float2fixed = float2fixed_pyfloat\n#epsilon_float2fixed = epsilon_float2fixed_pyfloat\n\nfixed2float = fixed2float_dynamic\nfloat2fixed = float2fixed_dynamic\nepsilon_float2fixed = epsilon_float2fixed_dynamic\n\n################################################################################\n# Point Float Utility Functions\n################################################################################\n\n# Convert Fixed Width multi-dimensional point to Float multi-dimensional Point\n# on f_params\n#\n# Returns (float_point) Float multi-dimensional point\ndef point_fixed2float(point, f_params):\n fl_point = []\n for p in point:\n fl_point.append(fixed2float(p, f_params))\n return fl_point\n\n# Evaluate Euclidean distance between two points\n#\n# Returns (float) distance\ndef point_distance(p1, p2, f_params):\n p1 = point_fixed2float(p1, f_params)\n p2 = point_fixed2float(p2, f_params)\n\n distance = type(p1[0])(0.0)\n for i in range(f_params['dimensions']):\n distance += (p1[i] - p2[i])**2\n distance = type(p1[0])(numpy.sqrt(distance))\n\n return distance\n\n# Evaluate fitness of a point on f_params function\n#\n# Returns (float) fitness\ndef point_fitness(point, f_params):\n # Calculate fitness as -abs(f(x))\n func = f_params['function']\n if isinstance(f_params['function'], types.StringType) or \\\n isinstance(f_params['function'], types.UnicodeType):\n func = eval(func)\n fitness = -1.0*abs(func(point_fixed2float(point, f_params)))\n\n # Set fitness large if close to known roots\n for r in f_params['solutions']:\n if point_distance(point, r, f_params) < f_params['cluster_epsilon']:\n fitness = -1.0*(2**f_params['resolution'])\n break\n\n return fitness\n\n################################################################################\n# Point Fixed Width Utility Functions\n################################################################################\n\n# Generate a new point\n#\n# Returns (point) Fixed Width multi-dimensional point\ndef point_generate(f_params):\n point = []\n for j in range(f_params['dimensions']):\n point.append( random.randint(0, 2**f_params['resolution']) )\n\n return point\n\n# Crossover two points\n#\n# Returns (list) Fixed Width multi-dimesional points\ndef point_crossover(p1, p2, f_params, prob_crossover):\n # If crossover occurs and the points are within crossover distance\n if random.random() < prob_crossover and \\\n point_distance(p1, p2, f_params) < f_params['crossover_epsilon']:\n # Average both points into one\n p3 = []\n for j in range(len(p1)):\n p3.append( (p1[j] + p2[j]) / 2 )\n return [p3]\n\n # Return original parents\n return [p1, p2]\n\n# Mutate a point\n#\n# Returns (point) Fixed Width multi-dimensional point\ndef point_mutate(p, f_params, prob_mutation):\n # For each dimension of the point\n for i in range(f_params['dimensions']):\n # For each of the bits\n for j in range(f_params['resolution']):\n # If mutation occurs, flip a bit\n if (random.random() < prob_mutation):\n p[i] = p[i] ^ 2**j\n\n return p\n\n################################################################################\n# Core GA Evolution Function\n################################################################################\n\n# Evolve a population to a new generation, including\n# initialization, selection, crossover, mutation.\n#\n# Arguments:\n# population (list) (point, fitness) tuples\n# params (dict) function and ga parameters\n#\n# Returns (list) (point, fitness) tuples\ndef evolve(population, params):\n f_params = params['f_params']\n ga_params = params['ga_params']\n\n # If population is empty, initialize population with new random members\n if len(population) == 0:\n for i in range(ga_params['new_popsize']):\n p = point_generate(f_params)\n f = point_fitness(p, f_params)\n population.append( (p, f) )\n\n # Fill population to minimum size with new random members\n for i in range(ga_params['min_popsize'] - len(population)):\n p = point_generate(f_params)\n f = point_fitness(p, f_params)\n population.append( (p, f) )\n\n # Sort population by fitness\n population = sorted(population, key=lambda x: x[1])\n\n # Compute the target evolved population size\n target_popsize = int(ga_params['keep_percent_popsize']*len(population))\n\n population_evolved = []\n\n while len(population_evolved) < target_popsize and len(population) > 2:\n # Choose fit member p1 by elitist selection\n p1 = population.pop()[0]\n # Choose fit member p2 by elitist selection\n p2 = population.pop()[0]\n\n # Crossover p1 and p2\n p_new = point_crossover(p1, p2, f_params, ga_params['prob_crossover'])\n # Mutate children\n p_new = [point_mutate(p, f_params, ga_params['prob_mutation']) for p in p_new]\n\n # Calculate their fitness and add them to our evolved population\n for p in p_new:\n population_evolved.append( (p, point_fitness(p, f_params)) )\n\n # Sort evolved population by fitness\n population_evolved = sorted(population_evolved, key=lambda x: x[1])\n\n return population_evolved\n\n","repo_name":"alect/TaskSprint","sub_path":"examples/genetic/ga_multiplerootfinder_lib.py","file_name":"ga_multiplerootfinder_lib.py","file_ext":"py","file_size_in_byte":7921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"43567250166","text":"import argparse\nfrom typing import Callable, Any\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\nfrom streaming.base import StreamingDataset\n\nclass CIFAR10Dataset(StreamingDataset):\n def __init__(self,\n remote: str,\n local: str,\n shuffle: bool,\n batch_size: int,\n transforms: Callable\n ) -> None:\n super().__init__(local=local, remote=remote, shuffle=shuffle, batch_size=batch_size)\n self.transforms = transforms\n\n def __getitem__(self, idx:int) -> Any:\n obj = super().__getitem__(idx)\n x = obj['x']\n y = obj['y']\n return self.transforms(x), y\n\ndef get_dataloader(remote_train, local_train, shuffle_train, batch_size):\n transformation = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(\n (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)\n )\n ])\n\n train_dataset = CIFAR10Dataset(remote_train, local_train, shuffle_train, batch_size=batch_size, transforms=transformation)\n # test_dataset = CIFAR10Dataset(remote_test, local_test, shuffle_test, batch_size=batch_size, transforms=transformation)\n\n train_dataloader = DataLoader(train_dataset, batch_size=batch_size, num_workers=8,\n pin_memory=True,\n persistent_workers=True)\n # test_dataloader = DataLoader(test_dataset, batch_size=batch_size)\n return train_dataloader\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--local_dir\", type=str)\n parser.add_argument(\"--remote_dir\", type=str)\n parser.add_argument(\"--batch_size\", type=int, default=32)\n parser.add_argument(\"--seed\", type=int, default=987)\n parser.add_argument(\"--num_workers\", type=int, default=8)\n parser.add_argument(\"--shuffle\", type=bool, default=False)\n parser.add_argument(\"--drop_last\", type=bool, default=False)\n args = parser.parse_args()\n\n local_dir = args.local_dir\n remote_dir = args.remote_dir\n train_dataloader = get_dataloader(remote_dir, local_dir, args.shuffle, args.batch_size)\n\n for idx, batch in enumerate(train_dataloader):\n print(idx)\n\n","repo_name":"karan6181/trial","sub_path":"cifar10_iterate.py","file_name":"cifar10_iterate.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37311146835","text":"import datetime\nimport logging\nimport os\nfrom random import randint, choices\n\nfrom flask import Flask, render_template, request, jsonify, make_response, session, url_for\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\nfrom flask_restful import abort\nfrom waitress import serve\nfrom werkzeug.utils import redirect\nfrom threading import Timer\n\nfrom data import db_session\nfrom data.app_school_user_point import UserPoint\nfrom data.answer_quest import Answer\nfrom data.diary_post import DiaryPost\nfrom data.like import Like\nfrom data.popularity import Popularity\nfrom data.questions import Quest\nfrom data.users import User\nfrom forms.point_user import PointForm\nfrom forms.add_question import AddQuest\nfrom forms.answer_quest import AnswerQuest\nfrom forms.login import LoginForm\nfrom forms.post import AddPost\nfrom forms.recovery import RecoveryForm, Conf, Finish\nfrom forms.register import RegisterForm, Confirmation\nfrom post import mail\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = open('protect.moona', 'r', encoding='utf-8').read().split()[2]\nlogging.basicConfig(filename='main.log')\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nhelp_arg = False\nhelp_arg_2 = False\nsend_msg = False\nsecret_code = None\nphoto = None\nuser_email = \"\"\n\n\ndef remove_java():\n global help_arg\n os.remove(help_arg)\n\n\ndef norm_data(datatime, date_or_time, r=False):\n if date_or_time == 'date':\n return '.'.join(str(datatime).split()[0].split('-')[::-1])\n elif date_or_time == 'time':\n return ':'.join(str(datatime).split()[1].split(':')[0:2])\n elif date_or_time == 'datetime':\n date = '.'.join(str(datatime).split()[0].split('-')[::-1])\n times = ':'.join(str(datatime).split()[1].split(':')[0:2])\n datatimes = date + ' ' + times if r else times + ' ' + date\n datatimes = datetime\n return datatimes\n\n\ndef save_photo(photo, login, post=False, id_post=None):\n if not post:\n with open(f'static/app_image/users_photo/{login}_logo.png', 'wb') as f:\n photo.save(f)\n return f'static/app_image/users_photo/{login}_logo.png'\n elif post and id_post is not None:\n with open(f'static/app_image/post_photo/{login}_post_{id_post}.png', 'wb') as f:\n photo.save(f)\n return f'static/app_image/post_photo/{login}_post_{id_post}.png'\n\n\ndef secret_key():\n return ''.join([str(randint(0, 9)) for i in range(5)])\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n db_sess = db_session.create_session()\n return db_sess.query(User).get(user_id)\n\n\n@app.route('/')\ndef main_page():\n return render_template('/main/main.html', title='Добро пожаловать')\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if not current_user.is_authenticated:\n redir = request.args.get('redir') if request.args.get('redir') else False\n form = LoginForm()\n if form.validate_on_submit():\n db_sess = db_session.create_session()\n user = db_sess.query(User).filter(User.email == form.email.data).first()\n if user and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n if redir:\n return redirect(f'/{redir}')\n else:\n return redirect('/')\n return render_template('main/login.html',\n message=\"Неправильный логин или пароль\",\n form=form)\n return render_template('main/login.html', title='Авторизация', form=form, message='')\n else:\n return redirect('/')\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if not current_user.is_authenticated:\n form = RegisterForm()\n form.simple = True\n if form.validate_on_submit():\n if form.password.data != form.password2.data:\n return render_template('main/register.html', title='Регистрация',\n form=form,\n message=\"Пароли не совпадают\")\n data_session = db_session.create_session()\n if data_session.query(User).filter(User.login == form.login.data).first():\n return render_template('main/register.html', title='Регистрация',\n form=form,\n message=\"Такой пользователь уже есть\")\n if data_session.query(User).filter(User.email == form.email.data).first():\n return render_template('main/register.html', title='Регистрация',\n form=form,\n message=\"Такая почта уже есть\")\n if form.photo.data:\n photo = save_photo(form.photo.data, form.login.data)\n else:\n photo = False\n session['ps'] = form.password.data\n return redirect(\n url_for('confirmation', photo=photo, name=form.name.data, surname=form.surname.data,\n login=form.login.data,\n birthday=form.birthday.data, about=form.about.data, email=form.email.data, form=True))\n return render_template('main/register.html', title='Регистрация', form=form, message='')\n else:\n return redirect('/')\n\n\n@app.route('/user/<string:login>', methods=['GET', 'POST'])\ndef profile(login):\n if current_user.is_authenticated and current_user.login == login:\n message = request.args.get('message') if request.args.get('message') else ''\n form = RegisterForm()\n if form.del_photo.data:\n data_session = db_session.create_session()\n user = data_session.query(User).filter(User.id == current_user.id).first()\n os.remove(user.photo)\n user.photo = '/static/img/None_logo.png'\n data_session.commit()\n data_session.close()\n return redirect(f'/user/{login}')\n if request.method == 'GET':\n form.email.data = current_user.email\n form.name.data = current_user.name\n form.surname.data = current_user.surname\n form.birthday.data = current_user.birthday\n form.about.data = current_user.about\n form.photo.data = current_user.photo if current_user.photo and 'None' not in current_user.photo else None\n if form.submit2.data:\n data_session = db_session.create_session()\n user = data_session.query(User).filter(User.id == current_user.id).first()\n if user:\n if form.photo.data != current_user.photo:\n if form.photo.data:\n user.photo = save_photo(form.photo.data, login)\n user.name = form.name.data\n user.surname = form.surname.data\n user.birthday = form.birthday.data\n user.about = form.about.data\n data_session.commit()\n data_session.close()\n if form.email.data != current_user.email:\n if data_session.query(User).filter(User.email == form.email.data).first():\n return redirect(f'/user/{login}?message=Такая почта уже есть')\n session['ps'] = None\n return redirect(\n url_for('confirmation', email_conf=True, email=form.email.data, form=True)\n )\n return redirect(f'/user/{login}')\n else:\n abort(404)\n return render_template('main/profile.html', title='Профиль', form=form, message=message)\n elif current_user.is_authenticated and current_user.login != login:\n pass\n else:\n return redirect('/login')\n\n\n@app.route('/confirmation', methods=['GET', 'POST'])\ndef confirmation():\n if request.args.get('form'):\n app_school = request.args.get('app_school') if request.args.get('app_school') else False\n email_conf = request.args.get('email_conf') if request.args.get('email_conf') else False\n data_session = db_session.create_session()\n form = RegisterForm(\n name=request.args.get('name'),\n surname=request.args.get('surname'),\n login=request.args.get('login'),\n birthday=request.args.get('birthday'),\n about=request.args.get('about'),\n email=request.args.get('email'),\n password=session['ps']\n )\n session['photo'] = request.args.get('photo')\n if 'send_msg' not in session:\n session['secret_code'] = secret_key()\n mail(f'Ваш секретный код: {session[\"secret_code\"]}', form.email.data, 'Moona Код')\n session['send_msg'] = True\n else:\n if not session['send_msg']:\n if 'no_code' in session:\n if not session['no_code']:\n session['secret_code'] = secret_key()\n mail(f'Ваш секретный код: {session[\"secret_code\"]}', form.email.data, 'Moona Код')\n session['send_msg'] = True\n session['no_code'] = False\n else:\n session['secret_code'] = secret_key()\n mail(f'Ваш секретный код: {session[\"secret_code\"]}', form.email.data, 'Moona Код')\n session['send_msg'] = True\n session['send_msg'] = False\n conf = Confirmation()\n if conf.validate_on_submit():\n if str(conf.code_key.data).strip() == str(session['secret_code']).strip():\n if not email_conf:\n if form.photo.data:\n user = User(\n name=form.name.data,\n surname=form.surname.data,\n login=form.login.data,\n birthday=datetime.datetime.strptime(form.birthday.data, \"%Y-%m-%d\").date(),\n about=form.about.data,\n email=form.email.data,\n photo=save_photo(session['photo'], form.login.data),\n role='user'\n )\n else:\n user = User(\n name=form.name.data,\n surname=form.surname.data,\n login=form.login.data,\n birthday=datetime.datetime.strptime(form.birthday.data, \"%Y-%m-%d\").date(),\n about=form.about.data,\n email=form.email.data,\n role='user',\n photo='/static/img/None_logo.png'\n )\n user.set_password(form.password.data)\n data_session.add(user)\n data_session.commit()\n data_session.close()\n session['send_msg'] = False\n if app_school:\n return redirect('/safeappschool/login')\n else:\n return redirect('/login')\n else:\n user = data_session.query(User).filter(User.id == current_user.id).first()\n if user:\n user.email = form.email.data\n data_session.commit()\n data_session.close()\n return redirect(f'/user/{current_user.login}')\n else:\n abort(404)\n else:\n session['no_code'] = True\n if app_school:\n return render_template('safe_app_school/confirmation.html', title='Подтверждение', form=conf,\n message='Коды не совпадают')\n else:\n return render_template('main/confirmation_reg.html', title='Подтверждение', form=conf,\n message='Коды не совпадают')\n else:\n if app_school:\n return render_template('safe_app_school/confirmation.html', title='Подтверждение', form=conf,\n message='')\n else:\n return render_template('main/confirmation_reg.html', title='Подтверждение', form=conf, message='')\n else:\n return redirect('/')\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n path = request.args.get('path')\n logout_user()\n if not path:\n return redirect(\"/\")\n else:\n return redirect(f'/{path}')\n\n\n@app.route('/safeappschool')\ndef safe_app_school():\n return redirect('/safeappschool/main')\n\n\n@app.route('/safeappschool/main', methods=['GET', 'POST'])\ndef safe_app_school_main():\n if current_user.is_authenticated:\n return render_template('safe_app_school/main.html', title='SafeAppSchool')\n else:\n return redirect('/safeappschool/login')\n\n\n@app.route('/safeappschool/login', methods=['GET', 'POST'])\ndef safe_app_school_login():\n if current_user.is_authenticated:\n return redirect('/safeappschool/main')\n else:\n form = LoginForm()\n if form.validate_on_submit():\n db_sess = db_session.create_session()\n user = db_sess.query(User).filter(User.email == form.email.data).first()\n if user and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect('/safeappschool/main')\n return render_template('/safe_app_school/login.html',\n message=\"Неправильный логин или пароль\",\n form=form)\n return render_template('/safe_app_school/login.html', title='Вход', form=form, message='')\n\n\n@app.route('/safeappschool/register', methods=['GET', 'POST'])\ndef safe_app_school_register():\n if current_user.is_authenticated:\n return redirect('/safeappschool/main')\n else:\n form = RegisterForm()\n form.simple = True\n if form.validate_on_submit():\n if form.password.data != form.password2.data:\n return render_template('simple/simple_register.html', title='Регистрация',\n form=form,\n message=\"Пароли не совпадают\")\n data_session = db_session.create_session()\n if data_session.query(User).filter(User.login == form.login.data).first():\n return render_template('safe_app_school/register.html', title='Регистрация',\n form=form,\n message=\"Такой пользователь уже есть\")\n if data_session.query(User).filter(User.email == form.email.data).first():\n return render_template('safe_app_school/register.html', title='Регистрация',\n form=form,\n message=\"Такая почта уже есть\")\n if form.photo.data:\n photo = save_photo(form.photo.data, form.login.data)\n else:\n photo = False\n session['ps'] = form.password.data\n return redirect(\n url_for('confirmation', photo=photo, name=form.name.data, surname=form.surname.data,\n login=form.login.data,\n birthday=form.birthday.data, about=form.about.data, email=form.email.data, form=True,\n app_school=True))\n return render_template('safe_app_school/register.html', title='Регистрация', form=form, message='')\n\n\n@app.route('/safeappschool/about')\ndef safe_app_school_about():\n if current_user.is_authenticated:\n return render_template('safe_app_school/about.html')\n else:\n return redirect('/safe_app_school/login')\n\n\n@app.route('/safeappschool/setting', methods=['GET', 'POST'])\ndef safe_app_school_setting():\n if current_user.is_authenticated:\n form = PointForm()\n data_session = db_session.create_session()\n point = data_session.query(UserPoint).filter(UserPoint.user == current_user.id).first()\n if form.validate_on_submit():\n if point:\n point.school_address = form.school_address.data\n point.home_address = form.home_address.data\n else:\n point = UserPoint(\n user=current_user.id,\n home_address=form.home_address.data,\n school_address=form.school_address.data\n )\n data_session.add(point)\n data_session.commit()\n data_session.close()\n return redirect('/safeappschool/main')\n if point:\n form.school_address.data = point.school_address\n form.home_address.data = point.home_address\n return render_template('safe_app_school/setting.html', form=form, message='')\n else:\n return redirect('/safe_app_school/login')\n\n\n@app.route('/safeappschool/go/<string:point>')\ndef safe_app_school_go(point):\n global help_arg\n if current_user.is_authenticated:\n data_session = db_session.create_session()\n address = data_session.query(UserPoint).filter(UserPoint.user == current_user.id).first()\n if address:\n if address.school_address and address.home_address:\n with open('static/js/safe_app_school/mapbasics_templates.js', 'r', encoding='utf-8') as file:\n new_file = file.read().split('<point1>')\n new_file = new_file[\n 0] + f'\\'{str(address.home_address).strip() if point == \"school\" else str(address.school_address).strip()}\\'' \\\n + new_file[1]\n new_file = new_file.split('<point2>')\n new_file = new_file[\n 0] + f'\\'{str(address.school_address).strip() if point == \"school\" else str(address.home_address).strip()}\\'' + \\\n new_file[1]\n with open(f'static/js/safe_app_school/{str(current_user.id)}mapbasics.js', 'w',\n encoding='utf-8') as new_js:\n new_js.write(new_file)\n help_arg = f'static/js/safe_app_school/{str(current_user.id)}mapbasics.js'\n t = Timer(15, remove_java, args=None, kwargs=None)\n t.start()\n if point == 'home':\n return render_template('safe_app_school/route.html', title='Маршрут домой', route='домой',\n path=help_arg)\n elif point == 'school':\n return render_template('safe_app_school/route.html', title='Маршрут в школу', route='в школу',\n path=help_arg)\n else:\n return redirect('/safe_app_school/main')\n else:\n return render_template('safe_app_school/route.html', title='Маршрут не указан', route=False)\n else:\n return render_template('safe_app_school/route.html', title='Маршрут не указан', route=False)\n else:\n return redirect('/safe_app_school/login')\n\n\n@app.route('/diary/')\ndef main_diary_page():\n return render_template('diary/main.html', title='moona')\n\n\n@app.route('/diary/edit_profile/<string:logins>', methods=['GET', 'POST'])\ndef edit_profile(logins):\n if current_user.is_authenticated:\n global photo\n global help_arg\n global help_arg_2\n form = RegisterForm()\n session = db_session.create_session()\n ph_f = False\n if 'None_logo' not in current_user.photo:\n photo = current_user.photo\n ph_f = True\n else:\n photo = None\n if form.del_photo.data:\n help_arg = photo\n ph_f = False\n if form.submit2.data:\n user = session.query(User).filter(User.login == logins).first()\n if user.email != form.email.data:\n if session.query(User).filter(User.email == form.email.data).first():\n if not form.photo.data and help_arg:\n help_arg = False\n return render_template('diary/edit_profile.html', title='Редактирование профиля', form=form,\n ph_f=ph_f,\n message=\"Такая почта уже есть\")\n else:\n help_arg = True\n help_arg_2 = form.email.data\n return redirect('/diary/confirmation')\n user.name = form.name.data\n user.surname = form.surname.data\n user.birthday = form.birthday.data\n user.about = form.about.data\n photo = '../../static/img/None_logo.png'\n if not ph_f and form.photo.data:\n photo = save_photo(form.photo.data, logins)\n if help_arg == photo:\n os.remove(help_arg)\n help_arg = False\n photo = '../../static/img/None_logo.png'\n user.photo = photo\n session.commit()\n if user.email == form.email.data:\n return redirect('/diary/profile')\n else:\n help_arg_2 = form.email.data\n help_arg = False\n return redirect('/diary/confirmation')\n if request.method == \"GET\":\n if current_user.login == logins:\n form.email.data = current_user.email\n form.name.data = current_user.name\n form.surname.data = current_user.surname\n form.login.data = logins\n form.birthday.data = current_user.birthday\n form.about.data = current_user.about\n form.password.data = None\n form.password2.data = None\n if not form.photo.data and help_arg:\n help_arg = False\n return render_template('diary/edit_profile.html', title='Редактирование профиля', form=form, message='',\n ph_f=ph_f)\n else:\n return redirect('/diary/login')\n\n\n@app.route('/diary/profile')\ndef diary_profile():\n if current_user.is_authenticated:\n global help_arg_2\n db_sess = db_session.create_session()\n pub_post = db_sess.query(DiaryPost).filter(DiaryPost.author == current_user.id, DiaryPost.public == 1).all()\n pub_post = pub_post[::-1]\n emotion_pub = []\n for i in pub_post:\n emotion = {id: i.id, 'pos_emot': [], 'nig_emot': [], 'link': [], 'like': None, 'is_like': 0,\n 'author': current_user}\n if i.pos_emot:\n emotion['pos_emot'] = i.pos_emot.split()\n else:\n emotion['pos_emot'] = None\n if i.nig_emot:\n emotion['nig_emot'] = i.nig_emot.split()\n else:\n emotion['nig_emot'] = None\n if i.link:\n emotion['link'] = i.link.split()\n else:\n emotion['link'] = None\n like = db_sess.query(Like).filter(Like.post == i.id).all()\n if like:\n emotion['like'] = len(like)\n if db_sess.query(Like).filter(Like.post == i.id, Like.user == current_user.id).first():\n emotion['is_like'] = 1\n emotion_pub.append(emotion)\n message = 'Ваша почта успешно изменена!' if help_arg_2 == 'EditEmail' else ''\n if help_arg_2:\n help_arg_2 = False\n return render_template('diary/profile.html', title='Профиль', pub_post=pub_post, emotion_pub=emotion_pub,\n message=message)\n else:\n return redirect('/diary/login')\n\n\n@app.route('/diary/new_like/<int:user_id>/<int:post_id>/<string:ret_href>')\ndef new_like(user_id, post_id, ret_href):\n if current_user.is_authenticated:\n session = db_session.create_session()\n find = session.query(Like).filter(Like.post == post_id, Like.user == user_id).first()\n if find:\n if (find.date - datetime.datetime.now()).days <= 30:\n pop = session.query(Popularity).filter(Popularity.post == post_id).first()\n pop.popularity = 10 * sum(1 if (i.date - datetime.datetime.now()).days <= 30 else 0 for i in\n session.query(Like).filter(Like.post == post_id).all()) - 10\n if not pop.popularity:\n session.delete(pop)\n session.delete(find)\n session.commit()\n if ret_href != 'main':\n return redirect(f\"/diary/{ret_href}\")\n else:\n return redirect('/diary/')\n else:\n popular = session.query(Popularity).filter(Popularity.post == post_id).first()\n if not popular:\n pop = Popularity()\n pop.post = post_id\n pop.popularity = 10\n pop.edit_date = datetime.datetime.now()\n session.add(pop)\n else:\n popular.popularity += 10\n like = Like()\n like.user = user_id\n like.post = post_id\n like.date = datetime.datetime.now()\n session.add(like)\n session.commit()\n if ret_href != 'main':\n return redirect(f\"/diary/{ret_href}\")\n else:\n return redirect('/diary/')\n else:\n return redirect('/diary/')\n\n\n@app.route('/diary/publications', methods=['GET', 'POST'])\ndef publications():\n session = db_session.create_session()\n fresh_posts_betta = session.query(DiaryPost).filter(DiaryPost.public == 1).all()[::-1]\n day, posts = 7, 20\n fresh_posts = []\n for i in fresh_posts_betta:\n copy_pos = fresh_posts_betta[::]\n if abs((i.date - datetime.datetime.now()).days) <= day:\n fresh_posts.append(copy_pos.pop(copy_pos.index(i)))\n while len(fresh_posts) < posts < len(fresh_posts) + len(fresh_posts_betta):\n copy_pos = fresh_posts_betta[::]\n day += 1\n posts -= 5\n for i in fresh_posts_betta:\n if abs((i.date - datetime.datetime.now()).days) <= day:\n fresh_posts.append(copy_pos.pop(copy_pos.index(i)))\n emotion_fresh = []\n if fresh_posts:\n for i in fresh_posts:\n emotion = {id: i.id, 'pos_emot': [], 'nig_emot': [], 'link': [],\n 'author': session.query(User).filter(User.id == i.author).first(), 'like': None, 'is_like': 0}\n if i.pos_emot:\n emotion['pos_emot'] = i.pos_emot.split()\n else:\n emotion['pos_emot'] = None\n if i.nig_emot:\n emotion['nig_emot'] = i.nig_emot.split()\n else:\n emotion['nig_emot'] = None\n if i.link:\n emotion['link'] = i.link.split()\n else:\n emotion['link'] = None\n like = session.query(Like).filter(Like.post == i.id).all()\n if like:\n emotion['like'] = len(like)\n if current_user.is_authenticated:\n if session.query(Like).filter(Like.post == i.id, Like.user == current_user.id).first():\n emotion['is_like'] = 1\n emotion_fresh.append(emotion)\n pop = sorted(session.query(Popularity).all(), key=lambda x: x.popularity, reverse=True)\n if pop:\n if len(pop) > 50:\n pop = pop[:50]\n pop_post = list(\n map(lambda x: session.query(DiaryPost).filter(DiaryPost.public == 1, DiaryPost.id == x.post).first(), pop))\n emotion_pop = []\n for i in pop_post:\n logging.warning(f'{datetime.datetime.now()}:{i} - i_pop_post')\n emotion = {id: i.id, 'pos_emot': [], 'nig_emot': [], 'link': [],\n 'author': session.query(User).filter(User.id == i.author).first(), 'like': None,\n 'is_like': 0}\n if i.pos_emot:\n emotion['pos_emot'] = i.pos_emot.split()\n else:\n emotion['pos_emot'] = None\n if i.nig_emot:\n emotion['nig_emot'] = i.nig_emot.split()\n else:\n emotion['nig_emot'] = None\n if i.link:\n emotion['link'] = i.link.split()\n else:\n emotion['link'] = None\n like = session.query(Like).filter(Like.post == i.id).all()\n if like:\n emotion['like'] = len(like)\n if current_user.is_authenticated:\n if session.query(Like).filter(Like.post == i.id, Like.user == current_user.id).first():\n emotion['is_like'] = 1\n emotion_pop.append(emotion)\n else:\n pop_post = []\n emotion_pop = []\n for_you = sorted(session.query(DiaryPost).filter(DiaryPost.public == 1).all(),\n key=lambda x: (len(x.text), 1 if x.photo else 0, -(x.date - datetime.datetime.now()).days))\n if len(for_you) > 50:\n for_you_post = choices(for_you, k=50)\n else:\n for_you_post = set(for_you)\n emotion_for_you = []\n for i in for_you_post:\n emotion = {id: i.id, 'pos_emot': [], 'nig_emot': [], 'link': [],\n 'author': session.query(User).filter(User.id == i.author).first(), 'like': None, 'is_like': 0}\n if i.pos_emot:\n emotion['pos_emot'] = i.pos_emot.split()\n else:\n emotion['pos_emot'] = None\n if i.nig_emot:\n emotion['nig_emot'] = i.nig_emot.split()\n else:\n emotion['nig_emot'] = None\n if i.link:\n emotion['link'] = i.link.split()\n else:\n emotion['link'] = None\n like = session.query(Like).filter(Like.post == i.id).all()\n if like:\n emotion['like'] = len(like)\n if current_user.is_authenticated:\n if session.query(Like).filter(Like.post == i.id, Like.user == current_user.id).first():\n emotion['is_like'] = 1\n emotion_for_you.append(emotion)\n return render_template('diary/publications.html', fresh_post=fresh_posts, emotion_fresh=emotion_fresh,\n title='Публикации',\n pop_post=pop_post, emotion_pop=emotion_pop, for_you_post=for_you_post,\n emotion_for_you=emotion_for_you)\n\n\n@app.route('/diary/answer_quest/<int:id>', methods=['GET', 'POST'])\ndef answer_quest(id):\n if current_user.is_authenticated:\n session = db_session.create_session()\n answer = AnswerQuest()\n quest = session.query(Quest).filter(Quest.id == id).first()\n if request.method == 'GET':\n if session.query(Answer).filter(Answer.id_question == id, Answer.user == current_user.id).first():\n ans_quest = session.query(Answer).filter(Answer.id_question == id,\n Answer.user == current_user.id).first()\n answer.answer.data = ans_quest.answer\n if answer.validate_on_submit():\n if not session.query(Answer).filter(Answer.id_question == id, Answer.user == current_user.id).first():\n answer_user = Answer(id_question=id,\n answer=answer.answer.data,\n user=current_user.id,\n date=datetime.date.today())\n quest.one_used = True\n if len(session.query(Answer).filter(Answer.id_question == id).all()) == len(session.query(User).all()):\n quest.all_used = True\n session.add(answer_user)\n session.commit()\n return redirect('/diary/diary')\n else:\n ans_quest = session.query(Answer).filter(Answer.id_question == id).first()\n ans_quest.answer = answer.answer.data\n session.commit()\n return redirect('/diary/diary')\n return render_template('diary/answer_quest.html', tetle='Ответ на вопрос', form=answer, message='', quest=quest)\n else:\n return redirect('/diary/')\n\n\n@app.route('/diary/delete_quest/<int:id>', methods=['GET', 'POST'])\ndef delete_quest(id):\n if current_user.is_authenticated:\n session = db_session.create_session()\n pos = session.query(Quest).filter(Quest.id == id).first()\n if pos:\n session.delete(pos)\n session.commit()\n else:\n abort(404)\n return redirect('/diary/add_question')\n else:\n return redirect('/diary/')\n\n\n@app.route('/diary/add_question', methods=['GET', 'POST'])\ndef add_question():\n if current_user.is_authenticated:\n if current_user.role == 'admin':\n que = AddQuest()\n session = db_session.create_session()\n if que.validate_on_submit():\n if que.quest.data in list(map(lambda x: x.quest, session.query(Quest).all())):\n return render_template('diary/add_question.html', message='Такой вопрос уже есть!',\n title='Добавить вопрос',\n form=que)\n new_que = Quest()\n new_que.quest = que.quest.data.strip()\n session.add(new_que)\n session.commit()\n que.quest.data = ''\n return render_template('diary/add_question.html', message='', title='Добавить вопрос', form=que,\n question=session.query(Quest).all())\n else:\n return redirect('/diary/')\n else:\n return redirect('/diary/')\n\n\n@app.route('/diary/post/<int:id>', methods=['GET', 'POST'])\ndef post_edit(id):\n if current_user.is_authenticated:\n session = db_session.create_session()\n find_post = session.query(DiaryPost).filter(DiaryPost.id == id).first()\n if find_post:\n if find_post.author == current_user.id:\n global photo\n global help_arg\n post_ed = AddPost()\n ph_f = False\n if post_ed.del_photo.data:\n help_arg = photo\n photo = None\n if request.method == \"GET\":\n session = db_session.create_session()\n post_exc = session.query(DiaryPost).filter(DiaryPost.id == id,\n DiaryPost.author == current_user.id).first()\n if post_exc:\n post_ed.name.data = post_exc.name\n post_ed.text.data = post_exc.text\n post_ed.public.data = post_exc.public\n post_ed.pos_emot.data = post_exc.pos_emot\n post_ed.nig_emot.data = post_exc.nig_emot\n post_ed.link.data = post_exc.link\n if post_exc.photo:\n photo = post_exc.photo\n ph_f = True\n else:\n photo = None\n else:\n abort(404)\n if post_ed.validate_on_submit() and not post_ed.del_photo.data:\n session = db_session.create_session()\n post_exc = session.query(DiaryPost).filter(DiaryPost.id == id,\n DiaryPost.author == current_user.id).first()\n if post_exc:\n post_exc.name = post_ed.name.data\n post_exc.text = post_ed.text.data\n post_exc.public = post_ed.public.data\n post_exc.pos_emot = post_ed.pos_emot.data\n post_exc.nig_emot = post_ed.nig_emot.data\n post_exc.link = post_ed.link.data\n if help_arg:\n os.remove(help_arg)\n help_arg = False\n if post_ed.photo.data:\n post_exc.photo = save_photo(post_ed.photo.data, current_user.login, post=True,\n id_post=post_exc.id)\n else:\n post_exc.photo = photo\n check_pop = session.query(Popularity).filter(Popularity.post == post_exc.id).first()\n if not post_ed.public.data and check_pop:\n session.delete(check_pop)\n session.commit()\n return redirect('/diary/diary')\n else:\n abort(404)\n return render_template('diary/post.html', form=post_ed, message='', title='Изменить запись', pht=ph_f)\n else:\n return redirect('/diary/diary')\n else:\n return redirect('/diary/diary')\n else:\n return redirect('/diary/login')\n\n\n@app.route('/diary/post_deleted/<int:id>', methods=['GET', 'POST'])\ndef post_deleted(id):\n if current_user.is_authenticated:\n session = db_session.create_session()\n find_post = session.query(DiaryPost).filter(DiaryPost.id == id).first()\n if find_post:\n if find_post.author == current_user.id or current_user.role == 'admin':\n session = db_session.create_session()\n pos = session.query(DiaryPost).filter(DiaryPost.id == id).first()\n if pos:\n if pos.photo:\n os.remove(pos.photo)\n likes = session.query(Like).filter(Like.post == pos.id).all()\n if likes:\n list(map(lambda i: session.delete(i), likes))\n pop = session.query(Popularity).filter(Popularity.post == pos.id).first()\n if pop:\n session.delete(pop)\n session.delete(pos)\n session.commit()\n else:\n abort(404)\n return redirect('/diary/diary')\n else:\n return redirect('/diary/diary')\n else:\n return redirect('/diary/diary')\n else:\n return redirect('/diary/login')\n\n\n@app.route('/diary/add_post', methods=['GET', 'POST'])\ndef add_post():\n if current_user.is_authenticated:\n pos = AddPost()\n session = db_session.create_session()\n if pos.validate_on_submit():\n try:\n id = session.query(DiaryPost).order_by(DiaryPost.id)[-1].id\n if id:\n id += 1\n else:\n id = -1\n except Exception:\n id = -1\n if pos.photo.data:\n diart_pos = DiaryPost(name=pos.name.data,\n text=pos.text.data,\n author=current_user.id,\n date=datetime.datetime.now(),\n photo=save_photo(pos.photo.data, current_user.login, post=True, id_post=id),\n public=pos.public.data,\n pos_emot=pos.pos_emot.data,\n nig_emot=pos.nig_emot.data,\n link=pos.link.data)\n session.add(diart_pos)\n session.commit()\n return redirect(\"/diary/diary\")\n else:\n diart_pos = DiaryPost(name=pos.name.data,\n text=pos.text.data,\n author=current_user.id,\n date=datetime.datetime.now(),\n public=pos.public.data,\n pos_emot=pos.pos_emot.data,\n nig_emot=pos.nig_emot.data,\n link=pos.link.data)\n session.add(diart_pos)\n session.commit()\n return redirect(\"/diary/diary\")\n return render_template('diary/post.html', form=pos, title='Новый пост', message='')\n else:\n return redirect('/diary/login')\n\n\n@app.route('/diary/diary', methods=['GET', 'POST'])\ndef diary():\n db_sess = db_session.create_session()\n if current_user.is_authenticated:\n posts = db_sess.query(DiaryPost).filter(DiaryPost.author == current_user.id).all()\n posts = posts[::-1]\n pub_post = db_sess.query(DiaryPost).filter(DiaryPost.author == current_user.id, DiaryPost.public == 1).all()\n pub_post = pub_post[::-1]\n emotion_pub = []\n for i in pub_post:\n emotion = {id: i.id, 'pos_emot': [], 'nig_emot': [], 'link': [], 'like': None, 'is_like': 0}\n if i.pos_emot:\n emotion['pos_emot'] = i.pos_emot.split()\n else:\n emotion['pos_emot'] = None\n if i.nig_emot:\n emotion['nig_emot'] = i.nig_emot.split()\n else:\n emotion['nig_emot'] = None\n if i.link:\n emotion['link'] = i.link.split()\n else:\n emotion['link'] = None\n like = db_sess.query(Like).filter(Like.post == i.id).all()\n if like:\n emotion['like'] = len(like)\n if db_sess.query(Like).filter(Like.post == i.id, Like.user == current_user.id).first():\n emotion['is_like'] = 1\n emotion_pub.append(emotion)\n lis_emotion = []\n for i in posts:\n emotion = {id: i.id, 'pos_emot': [], 'nig_emot': [], 'link': []}\n if i.pos_emot:\n emotion['pos_emot'] = i.pos_emot.split()\n else:\n emotion['pos_emot'] = None\n if i.nig_emot:\n emotion['nig_emot'] = i.nig_emot.split()\n else:\n emotion['nig_emot'] = None\n if i.link:\n emotion['link'] = i.link.split()\n else:\n emotion['link'] = None\n lis_emotion.append(emotion)\n quest = db_sess.query(Answer).filter(Answer.user == current_user.id).all()\n try:\n days_reg = current_user.data_reg - datetime.date.today()\n days_reg = abs(days_reg.days) + 1\n if quest:\n post_quest = db_sess.query(Quest).filter(Quest.id.in_([i.id_question for i in quest])).all()\n else:\n post_quest = []\n max_quests = len(db_sess.query(Quest).all())\n while len(post_quest) < days_reg and max_quests > len(post_quest):\n post_quest.append(\n db_sess.query(Quest).filter(Quest.id.notin_([i.id for i in post_quest])).first())\n ans = []\n for i in post_quest:\n if i is not None:\n ans_id = db_sess.query(Answer).filter(\n Answer.id_question == i.id, Answer.user == current_user.id).first()\n if ans_id is not None:\n ans.append(ans_id)\n post_quest = post_quest[::-1]\n ans = ans[::-1]\n ans2 = {}\n for i in ans:\n ans2[i.id_question] = i\n except Exception as e:\n ans2 = []\n else:\n posts = None\n post_quest = None\n ans2 = None\n lis_emotion = None\n emotion_pub = None\n pub_post = None\n return render_template('diary/diary.html', title='Дневник', my_post=posts, message='', question=post_quest,\n ans=ans2, emotion=lis_emotion, emotion_pub=emotion_pub, pub_post=pub_post)\n\n\n@app.route('/diary/logout')\n@login_required\ndef diary_logout():\n logout_user()\n return redirect(\"/diary/\")\n\n\n@app.route('/diary/about_us')\ndef about():\n return render_template('diary/about.html', title='О нас')\n\n\n@app.route('/school_app_check_auth', methods=['POST'])\ndef check_auth():\n req = request.json\n email = req['login']\n password = req['password']\n session = db_session.create_session()\n user = session.query(User).filter(User.email == email).first()\n if user:\n if user.check_password(password) or user.check_hash_password(password):\n return make_response(jsonify({\n 'key': open('key.txt', 'r', encoding='utf-8').read(),\n 'name': user.name,\n 'surname': user.surname,\n 'login': user.login,\n 'hash': user.password\n }), 200)\n else:\n return abort(403)\n else:\n return abort(404)\n\n\n@app.route('/simple/can_close')\ndef can_close():\n return render_template('simple/simple_can_close.html', title='Можете закрыть страницу')\n\n\n@app.route('/simple/register', methods=['GET', 'POST'])\ndef school_reg():\n global help_arg\n global photo\n form = RegisterForm()\n form.simple = True\n if form.validate_on_submit():\n if form.password.data != form.password2.data:\n return render_template('simple/simple_register.html', title='Регистрация',\n form=form,\n message=\"Пароли не совпадают\")\n session = db_session.create_session()\n if session.query(User).filter(User.login == form.login.data).first():\n return render_template('simple/simple_register.html', title='Регистрация',\n form=form,\n message=\"Такой пользователь уже есть\")\n if session.query(User).filter(User.email == form.email.data).first():\n return render_template('simple/simple_register.html', title='Регистрация',\n form=form,\n message=\"Такая почта уже есть\")\n help_arg = form\n if form.photo.data:\n photo = save_photo(form.photo.data, form.login.data)\n return redirect('/diary/confirmation')\n return render_template('simple/simple_register.html', title='Регистрация', form=form, message='')\n\n\ndef main():\n db_session.global_init(\"db/moona_data.db\")\n try:\n serve(app, host='0.0.0.0', port=5000)\n except Exception as error:\n logging.warning(f'{datetime.datetime.now()}:{error}')\n # после запуска переходите по ссылке http://127.0.0.1:5000/ в вашем браузере\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AndreiDuvakin/MoonaProjectDevelopment","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":47701,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70714606345","text":"def char(s):\r\n dic = {}\r\n for n in s:\r\n keys = dic.keys()\r\n if n in keys:\r\n dic[n] += 1\r\n else:\r\n dic[n] = 1\r\n return dic\r\na=input(\"string\")\r\nprint(char(a))","repo_name":"arpandhakal/python_powerworkshop","sub_path":"jan 19/string/18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73225525705","text":"import firecrest as f7t\nimport os\n\n\n# Get the values from the env or set them directly in your file\nCLIENT_ID = os.environ.get(\"FIRECREST_CLIENT_ID\")\nCLIENT_SECRET = os.environ.get(\"FIRECREST_CLIENT_SECRET\")\nAUTH_TOKEN_URL = os.environ.get(\"AUTH_TOKEN_URL\")\nFIRECREST_URL = os.environ.get(\"FIRECREST_URL\")\n\n# Setup the auth object\nauth = f7t.ClientCredentialsAuth(\n CLIENT_ID, CLIENT_SECRET, AUTH_TOKEN_URL\n)\n\n# Setup the client object\nclient = f7t.Firecrest(\n firecrest_url=FIRECREST_URL,\n authorization=auth\n)\n\n# This call will only start the transfer of the file to the staging area\ndown_obj = client.external_download(\"daint\", \"/scratch/snx3000/eirinik/a_file.txt\")\n\nprint(type(down_obj))\n\n# You can follow the progress of the transfer through the status property\nprint(down_obj.status)\n\n# As soon as down_obj.status is 117 we can proceed with the download to a local file\ndown_obj.finish_download(\"my_local_file\")\n\nprint(down_obj.status)\n\n# You can get directly the link in the staging area and finish the download in your prefered way.\nprint(\"Direct link:\", down_obj.object_storage_link)\n\n# You can download the file as many times as we want from the staging area.\n# After you finish, you should invalidate the link.\ndown_obj.invalidate_object_storage_link()\n\n","repo_name":"eth-cscs/firecrest-training-2023","sub_path":"firecrest_demo/examples/pyfirecrest_storage.py","file_name":"pyfirecrest_storage.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"10280539188","text":"import os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\" # ignore TF log\nimport sys\nimport random\nimport copy\nimport os.path as osp\nimport glog as log\nimport json\nimport math\nimport time\nimport argparse\nfrom collections import defaultdict, Counter\nfrom itertools import combinations\nimport itertools\n\nimport numpy as np\nimport torch\nimport transformers\n\n\nfrom tree_utils import *\nfrom RL_env import EntailmentTreeEnv, State, Action\n\nfrom StepScorer import load_step_scorer, predict_step_scorer, StepScorer\n\n\nclass Verifier:\n def __init__(self, step_scorer = None, entail_scorer = None, bleurt_scorer= None,\n P_score_type = None, H_score_type = None, merge_strategy = None):\n \n self.step_scorer = step_scorer\n self.entail_scorer = entail_scorer\n self.bleurt_scorer = bleurt_scorer\n\n self.P_score_type = P_score_type\n self.H_score_type = H_score_type\n self.merge_strategy = merge_strategy\n\n\n def __call__(self):\n pass\n \n def verify_a_state(self, state, return_dict = False):\n highest_conclusions = state.get_pred_highest_conclusions()\n\n verifer_info = {}\n\n # ----- score of P -----\n if len(state.P) == 0 or len(highest_conclusions) == 0:\n P_score = 0.0\n else:\n # get step score from step scorer\n steps_index = []\n steps = []\n for si, step in enumerate(state.P):\n if 'step_scorer_score' not in step.keys():\n steps_index.append(si)\n steps.append({'pre_sent': step['pre_sent'], \n 'con_sent': step['con_sent'],})\n\n if steps:\n step_scores = self.step_scorer(steps)\n for score, index in zip(step_scores, steps_index):\n state.P[index]['step_scorer_score'] = score\n\n if self.P_score_type in [None, 'mean']:\n P_score = np.mean([step['step_scorer_score'] for step in state.P])\n\n elif self.P_score_type in ['min']:\n P_score = np.min([step['step_scorer_score'] for step in state.P])\n\n elif self.P_score_type in ['tree']:\n sent2score = {}\n for step in state.P: # P in order\n for p in step['pre_sent']:\n if state.sent2id[p].startswith('sent'):\n sent2score[p] = 1.0\n else:\n assert p in sent2score\n sent2score[step['con_sent']] = min(step['step_scorer_score'] , np.min([sent2score[p] for p in step['pre_sent']]))\n \n P_score = np.max([sent2score[con] for con in highest_conclusions])\n \n else:\n raise NotImplementedError\n\n # ----- score of H -----\n if len(state.P) == 0 or len(highest_conclusions) == 0:\n H_score = 0.0\n else:\n \n # score each intermeidate conclusion\n if self.H_score_type in [None, 'bleurt', 'bleurt+step_scorer']:\n steps_index = []\n bleurt_inputs = []\n for si, step in enumerate(state.P):\n if 'H_bleurt_score' not in step.keys():\n steps_index.append(si)\n bleurt_inputs.append(step['con_sent'])\n \n if bleurt_inputs:\n scores = self.bleurt_scorer.score(references = [state.H]*len(bleurt_inputs), \n candidates = bleurt_inputs)\n for score, index in zip(scores, steps_index):\n state.P[index]['H_bleurt_score'] = score\n \n if self.H_score_type in ['step_scorer', 'bleurt+step_scorer']:\n ## we replace the highest_conclusions with H are verifier the step\n steps_index = []\n step_inputs = []\n for si, step in enumerate(state.P):\n if 'H_step_scorer_score' not in step.keys():\n steps_index.append(si)\n step_inputs.append({\n 'pre_sent': step['pre_sent'],\n 'con_sent': state.H,\n })\n \n if step_inputs:\n scores = self.verify_steps(step_inputs)\n for score, index in zip(scores, steps_index):\n state.P[index]['H_step_scorer_score'] = score\n \n # calculate the H score\n if self.H_score_type in [None, 'bleurt']:\n con2score_b = {step['con_sent']:step['H_bleurt_score'] for step in state.P}\n proved_scores = [con2score_b[con] for con in highest_conclusions]\n H_score = np.max(proved_scores)\n\n elif self.H_score_type in ['step_scorer']:\n con2score_s = {step['con_sent']:step['H_step_scorer_score'] for step in state.P}\n proved_scores = [con2score_s[con] for con in highest_conclusions]\n H_score = np.max(proved_scores)\n\n elif self.H_score_type in ['bleurt+step_scorer']:\n \n con2score_b = {step['con_sent']:step['H_bleurt_score'] for step in state.P}\n proved_scores = [con2score_b[con] for con in highest_conclusions]\n H_score_b = np.max(proved_scores)\n\n con2score_s = {step['con_sent']:step['H_step_scorer_score'] for step in state.P}\n proved_scores = [con2score_s[con] for con in highest_conclusions]\n H_score_s = np.max(proved_scores)\n\n H_score = 0.5 * (H_score_b + H_score_s)\n\n verifer_info['H_score_b'] = H_score_b\n verifer_info['H_score_s'] = H_score_s\n\n\n else:\n raise NotImplementedError\n\n\n # ----- score of the whole state -----\n if self.merge_strategy in ['None', None, 'P+H']:\n state_score = 0.5 * (P_score + H_score)\n\n elif self.merge_strategy in ['P']:\n state_score = P_score\n elif self.merge_strategy in ['H']:\n state_score = H_score\n else:\n raise NotImplementedError\n\n if return_dict:\n verifer_info.update({\n 'state_score':state_score,\n 'P_score':P_score,\n 'H_score':H_score,\n })\n return verifer_info\n else:\n return state_score\n\n def verify_steps(self, steps):\n step_scores = self.step_scorer(steps)\n return step_scores\n\n def verify_get_root_node(self, state):\n highest_conclusions = state.get_pred_highest_conclusions()\n \n root_node = None\n if len(state.P) == 0 or len(highest_conclusions) == 0:\n return root_node\n \n if len(highest_conclusions) == 1:\n root_node = highest_conclusions[0]\n return root_node\n \n \n _ = self.verify_a_state(state)\n \n if self.H_score_type in [None, 'bleurt']:\n con2score_b = {step['con_sent']:step['H_bleurt_score'] for step in state.P}\n proved_scores = [con2score_b[con] for con in highest_conclusions]\n root_node = highest_conclusions[np.argmax(proved_scores)]\n\n elif self.H_score_type in ['step_scorer']:\n con2score_s = {step['con_sent']:step['H_step_scorer_score'] for step in state.P}\n proved_scores = [con2score_s[con] for con in highest_conclusions]\n root_node = highest_conclusions[np.argmax(proved_scores)]\n\n elif self.H_score_type in ['bleurt+step_scorer']:\n\n con2score_b = {step['con_sent']:step['H_bleurt_score'] for step in state.P}\n proved_scores_b = np.array([con2score_b[con] for con in highest_conclusions])\n\n con2score_s = {step['con_sent']:step['H_step_scorer_score'] for step in state.P}\n proved_scores_s = np.array([con2score_s[con] for con in highest_conclusions])\n\n root_node = highest_conclusions[np.argmax(proved_scores_b+proved_scores_s)]\n \n else:\n raise NotImplementedError\n\n return root_node\n","repo_name":"Raising-hrx/FAME","sub_path":"code/Verifier.py","file_name":"Verifier.py","file_ext":"py","file_size_in_byte":8299,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"86301746174","text":"from packet import Packet, DataPacket, ControlPacket\nclass X10Event:\n UNIT_ALL = 0\n TYPE_DATA = \"data\"\n TYPE_CONTROL = \"control\"\n\n def __init__(self, packet):\n self.packet = packet\n\n if type(packet) is DataPacket:\n self.type = X10Event.TYPE_DATA\n self.house = packet.house\n self.unit = packet.unit or UNIT_ALL\n self.command = packet.command\n\n elif type(packet) is ControlPacket:\n self.type = X10Event.TYPE_CONTROL\n self.opcode = packet.opcode\n self.data = packet.data\n\n def __str__(self):\n if self.type is X10Event.TYPE_DATA:\n result = \"[D] \" + self.house\n if 1 <= self.unit <= 16:\n result += \" \" + str(self.unit)\n result += \" \" + DataPacket.DESCRIPTIONS[self.command]\n elif self.type is X10Event.TYPE_CONTROL:\n result = \"[C] \"\n if self.opcode == ControlPacket.OPCODE_READY_STATUS:\n result += \"Ready Status: \"\n result += \"READY\" if self.data == ControlPacket.DATA_READY_STATUS_READY else \"NOT READY\"\n elif self.opcode == ControlPacket.OPCODE_ACKNOWLEDGE:\n result += \"Acknowledged\"\n if self.data:\n result += \" (\" + hex(self.data) + \")\"\n return result\n","repo_name":"umbc-hackafe/x10-controller","sub_path":"event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21895313755","text":"import typing\n\nimport numpy as np\nimport numpy.typing\nimport petsc4py.PETSc\nimport plum\n\nfrom rbnicsx._backends.proper_orthogonal_decomposition import (\n proper_orthogonal_decomposition_functions as proper_orthogonal_decomposition_functions_super,\n proper_orthogonal_decomposition_functions_block as proper_orthogonal_decomposition_functions_block_super,\n proper_orthogonal_decomposition_tensors as proper_orthogonal_decomposition_tensors_super, real_zero)\nfrom rbnicsx.online.functions_list import FunctionsList\nfrom rbnicsx.online.projection import matrix_action\nfrom rbnicsx.online.tensors_list import TensorsList\n\n# We could have used functools.singledispatch rather than plum, but since rbnicsx.online.projection\n# introduces a dependency on plum we also use it here for its better handling in combining docstrings\n# and its easier integration with sympy.\n\n\n@plum.overload\ndef proper_orthogonal_decomposition( # type: ignore[no-any-unimported]\n functions_list: FunctionsList, inner_product: petsc4py.PETSc.Mat, N: int = -1,\n tol: petsc4py.PETSc.RealType = real_zero, normalize: bool = True\n) -> typing.Tuple[\n np.typing.NDArray[petsc4py.PETSc.RealType], FunctionsList, typing.List[petsc4py.PETSc.Vec]\n]:\n \"\"\"\n Compute the proper orthogonal decomposition of a set of online snapshots.\n\n Parameters\n ----------\n functions_list\n Collected snapshots.\n inner_product\n Online matrix which defines the inner product. The resulting modes will be orthonormal\n w.r.t. this inner product.\n N\n Maximum number of modes to be computed. If not provided, it will be set to the number of collected snapshots.\n tol\n Tolerance on the retained energy. If not provided, it will be set to zero.\n normalize : bool, optional\n If true (default), the modes are scaled to unit norm.\n\n Returns\n -------\n :\n A tuple containing:\n 1. Eigenvalues of the correlation matrix, largest first. All computed eigenvalues are returned.\n 2. Retained modes from the snapshots. Only the first few modes are returned, till either the\n maximum number N is reached or the tolerance on the retained energy is fulfilled.\n 3. Eigenvectors of the correlation matrix. Only the first few eigenvectors are returned, till\n either the maximum number N is reached or the tolerance on the retained energy is fulfilled.\n \"\"\"\n compute_inner_product = matrix_action(inner_product)\n\n return proper_orthogonal_decomposition_functions_super( # type: ignore[return-value]\n functions_list, compute_inner_product, _scale_online_vector, N, tol, normalize)\n\n\n@plum.overload\ndef proper_orthogonal_decomposition( # type: ignore[no-any-unimported] # noqa: F811\n tensors_list: TensorsList, N: int = -1, tol: petsc4py.PETSc.RealType = real_zero, normalize: bool = True\n) -> typing.Tuple[\n np.typing.NDArray[petsc4py.PETSc.RealType], TensorsList, typing.List[petsc4py.PETSc.Vec]\n]:\n \"\"\"\n Compute the proper orthogonal decomposition of a set of online tensors.\n\n Parameters\n ----------\n tensors_list\n Collected tensors.\n N\n Maximum number of modes to be computed. If not provided, it will be set to the number of collected tensors.\n tol\n Tolerance on the retained energy. If not provided, it will be set to zero.\n normalize\n If true (default), the modes are scaled to unit norm.\n\n Returns\n -------\n :\n A tuple containing:\n 1. Eigenvalues of the correlation matrix, largest first. All computed eigenvalues are returned.\n 2. Retained modes from the tensors. Only the first few modes are returned, till either the\n maximum number N is reached or the tolerance on the retained energy is fulfilled.\n 3. Eigenvectors of the correlation matrix. Only the first few eigenvectors are returned, till\n either the maximum number N is reached or the tolerance on the retained energy is fulfilled.\n \"\"\"\n return proper_orthogonal_decomposition_tensors_super(tensors_list, N, tol, normalize) # type: ignore[return-value]\n\n\n@plum.dispatch\ndef proper_orthogonal_decomposition( # type: ignore[no-untyped-def] # noqa: ANN201, F811\n *args, **kwargs # noqa: ANN002, ANN003\n):\n \"\"\"Compute the proper orthogonal decomposition of a set of online snapshots or tensors.\"\"\"\n raise NotImplementedError(\"The abstract case has not been implemented\") # pragma: no cover\n\n\ndef proper_orthogonal_decomposition_block( # type: ignore[no-any-unimported]\n functions_lists: typing.Sequence[FunctionsList], inner_products: typing.List[petsc4py.PETSc.Mat],\n N: typing.Union[int, typing.List[int]] = -1,\n tol: typing.Union[petsc4py.PETSc.RealType, typing.List[petsc4py.PETSc.RealType]] = real_zero,\n normalize: bool = True\n) -> typing.Tuple[\n typing.List[np.typing.NDArray[petsc4py.PETSc.RealType]], typing.List[FunctionsList],\n typing.List[typing.List[petsc4py.PETSc.Vec]]\n]:\n \"\"\"\n Compute the proper orthogonal decomposition of a set of online snapshots, each made of several blocks.\n\n Parameters\n ----------\n functions_lists\n Collected snapshots. Each snapshot is made of several blocks, defined on possibly different reduced bases.\n The inner FunctionsList contains all snapshots of a single block, while the outer list collects the different\n blocks.\n inner_products\n Online matrices which define the inner products of each block. The resulting modes\n will be orthonormal w.r.t. these inner products.\n N\n Maximum number of modes to be computed. If an integer value is passed then the same maximum number is\n used for each block. To set a different maximum number of modes for each block pass a list of integers.\n If not provided, it will be set to the number of collected snapshots.\n tol\n Tolerance on the retained energy. If a floating point value is passed then the same tolerance is\n used for each block. To set a different tolerance for each block pass a list of floating point numbers.\n If not provided, it will be set to zero.\n normalize\n If true (default), the modes are scaled to unit norm.\n\n Returns\n -------\n :\n A tuple containing:\n 1. Eigenvalues of the correlation matrix, largest first. All computed eigenvalues are returned.\n The outer list collects the eigenvalues of different blocks.\n 2. Retained modes from the snapshots. Only the first few modes are returned, till either the\n maximum number N is reached or the tolerance on the retained energy is fulfilled.\n The outer list collects the retained modes of different blocks.\n 3. Eigenvectors of the correlation matrix. Only the first few eigenvectors are returned, till\n either the maximum number N is reached or the tolerance on the retained energy is fulfilled.\n The outer list collects the eigenvectors of different blocks.\n \"\"\"\n compute_inner_products = [matrix_action(inner_product) for inner_product in inner_products]\n\n return proper_orthogonal_decomposition_functions_block_super( # type: ignore[return-value]\n functions_lists, compute_inner_products, _scale_online_vector, N, tol, normalize)\n\n\ndef _scale_online_vector( # type: ignore[no-any-unimported]\n vector: petsc4py.PETSc.Vec, factor: petsc4py.PETSc.RealType\n) -> None:\n \"\"\"Scale an online petsc4py.PETSc.Vec.\"\"\"\n vector *= factor\n","repo_name":"RBniCS/RBniCSx","sub_path":"rbnicsx/online/proper_orthogonal_decomposition.py","file_name":"proper_orthogonal_decomposition.py","file_ext":"py","file_size_in_byte":7569,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"623342425","text":"import socket\nimport logging\nfrom .const import (\n NAME_CMD,\n SENSORS_CMD,\n CONFIG_GET_CMD,\n CMV_NAME_PREFIX,\n PRESET_BOOST,\n PRESET_NIGHT,\n PRESET_COOLING,\n FAN_LOW,\n FAN_MEDIUM,\n FAN_HIGH,\n FAN_HIGHEST,\n MODE_CMDS,\n LED_OFF_CMD,\n LED_ON_CMD,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass HeltyCMV:\n def __init__(self, host: str, port: int) -> None:\n self._host = host\n self._port = port\n self.name = host\n self._id = host.lower()\n self.online = True\n\n @property\n def cmv_id(self) -> str:\n return self._id\n\n async def test_connection(self) -> bool:\n \"\"\"Test connectivity to the Dummy hub is OK.\"\"\"\n cmv_name = await self.get_cmv_name()\n if not cmv_name:\n return False\n return True\n\n def _execute_cmv_cmd(self, cmd):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((self._host, self._port))\n s.sendall(cmd)\n data = s.recv(1024)\n s.close()\n return data.decode('ASCII').strip()\n\n async def get_cmv_name(self):\n try:\n return self._execute_cmv_cmd(NAME_CMD).removeprefix(CMV_NAME_PREFIX).strip()\n except Exception as e:\n _LOGGER.warning(e)\n return None\n\n async def get_cmv_indoor_air_temperature(self):\n try:\n indoor_air_temp = None\n data = self._execute_cmv_cmd(SENSORS_CMD).strip().split(',')\n if data[0] == \"VMGI\":\n indoor_air_temp = float(int(data[1]) / 10)\n return indoor_air_temp\n except Exception as e:\n _LOGGER.warning(e)\n return None\n\n async def get_cmv_outdoor_air_temperature(self):\n try:\n outdoor_air_temp = None\n data = self._execute_cmv_cmd(SENSORS_CMD).strip().split(',')\n if data[0] == \"VMGI\":\n outdoor_air_temp = float(int(data[2]) / 10)\n return outdoor_air_temp\n except Exception as e:\n _LOGGER.warning(e)\n return None\n\n async def get_cmv_indoor_humidity(self):\n try:\n indoor_air_humidity = None\n data = self._execute_cmv_cmd(SENSORS_CMD).strip().split(',')\n if data[0] == \"VMGI\":\n indoor_air_humidity = float(int(data[3]) / 10)\n return indoor_air_humidity\n except Exception as e:\n _LOGGER.warning(e)\n return None\n\n async def get_cmv_op_status(self):\n try:\n op_state_int = None\n data = self._execute_cmv_cmd(CONFIG_GET_CMD).strip().split(',')\n if data[0] == \"VMGO\":\n op_state_int = int(data[1])\n if op_state_int == 1:\n return {\"preset\": None, \"fan_mode\": FAN_LOW}\n elif op_state_int == 2:\n return {\"preset\": None, \"fan_mode\": FAN_MEDIUM}\n elif op_state_int == 3:\n return {\"preset\": None, \"fan_mode\": FAN_HIGH}\n elif op_state_int == 4:\n return {\"preset\": None, \"fan_mode\": FAN_HIGHEST}\n elif op_state_int == 5:\n return {\"preset\": PRESET_BOOST, \"fan_mode\": None}\n elif op_state_int == 6:\n return {\"preset\": PRESET_NIGHT, \"fan_mode\": None}\n elif op_state_int == 7:\n return {\"preset\": PRESET_COOLING, \"fan_mode\": None}\n else:\n return None\n except Exception as e:\n _LOGGER.warning(e)\n return None\n\n async def set_cmv_mode(self, mode):\n exec_result = self._execute_cmv_cmd(MODE_CMDS.get(mode, NAME_CMD))\n if exec_result == \"OK\":\n return True\n\n async def are_cmv_leds_on(self):\n try:\n led_state_int = None\n data = self._execute_cmv_cmd(CONFIG_GET_CMD).strip().split(',')\n if data[0] == \"VMGO\":\n led_state_int = int(data[2])\n if led_state_int == 10:\n return True\n elif led_state_int == 0:\n return False\n else:\n return None\n except Exception as e:\n _LOGGER.warning(e)\n return None\n\n async def turn_cmv_leds_off(self):\n exec_result = self._execute_cmv_cmd(LED_OFF_CMD)\n if exec_result == \"OK\":\n return True\n\n async def turn_cmv_leds_on(self):\n exec_result = self._execute_cmv_cmd(LED_ON_CMD)\n if exec_result == \"OK\":\n return True\n","repo_name":"MatteoManzoni/helty-cmv-integration-ha","sub_path":"custom_components/heltycmv/cmv.py","file_name":"cmv.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"70160118026","text":"# coding=utf-8\n\nimport os\nimport pickle\nfrom tqdm import tqdm\nfrom preprocess.utils import *\nfrom detector.model import p_net, r_net\nfrom detector.mtcnn_detector import MtCnnDetector\nfrom config import r_net_size, o_net_size, \\\n min_face, p_net_stride, face_thresholds\n\n\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\nassert len(physical_devices) > 0, \"Not enough GPU hardware devices available\"\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n\ndef gen_hard_example(size, gray_flag, data_dir, model_paths):\n \"\"\"\n generate hard example for next net\n :return:\n \"\"\"\n channel = 1 if gray_flag else 3\n # models\n\n if size == r_net_size:\n net = 'r_net'\n save_size = r_net_size\n elif size == o_net_size:\n net = 'o_net'\n save_size = o_net_size\n else:\n return\n # images path\n image_dir = os.path.join(data_dir, 'WIDER_train/images/')\n output_dir = os.path.join(data_dir, net)\n detectors = [None, None, None]\n net_p = p_net(channel)\n net_p.load_weights(model_paths[0])\n detectors[0] = net_p\n if size == o_net_size:\n net_r = r_net(channel)\n net_r.load_weights(model_paths[1])\n detectors[1] = net_r\n print(\"r_net loaded!\")\n wider_face_file = os.path.join(data_dir, 'wider_face_train_bbx_gt.txt')\n data = read_annotations(image_dir, wider_face_file)\n mtcnn = MtCnnDetector(detectors, min_face, p_net_stride, face_thresholds)\n save_detects_file = os.path.join(output_dir, net + '_detections.pkl')\n if not os.path.exists(save_detects_file):\n print('loading data to dataset')\n loaded_dataset = load_data_to_dataset(data['images'], channel)\n print('starting to detect')\n detect_result, _ = mtcnn.detect_face(loaded_dataset) # fixme Not only 3000\n print('detect over')\n with open(save_detects_file, 'wb') as f:\n pickle.dump(detect_result, f, 1)\n print('start to generate hard image')\n save_hard_example(save_size, data, save_detects_file, output_dir)\n\n\ndef save_hard_example(save_size, data, saved_file, output_dir):\n \"\"\"\n crop original image using previous net outputted boxes for next net\n :param save_size:\n :param data:\n :param saved_file:\n :param output_dir:\n :return:\n \"\"\"\n img_list = data['images']\n gt_boxes_list = data['bboxes']\n num_img = len(img_list)\n if save_size == r_net_size:\n net = 'r_net'\n elif save_size == o_net_size:\n net = 'o_net'\n else:\n return\n\n neg_label_file, pos_label_file, part_label_file = \\\n [output_dir+\"/train_%s_%s.txt\" % (net, i) for i in ['neg', 'pos', 'part']]\n neg_file, pos_file, part_file = \\\n [open(file, 'w') for file in [neg_label_file, pos_label_file, part_label_file]]\n # read detect results\n with open(saved_file, 'rb') as sf:\n detected_box = pickle.load(sf)\n print('num of detected_box is {}, num of images is {}.'.format(\n len(detected_box), num_img))\n neg_idx, pos_idx, part_idx = 0, 0, 0\n proc_idx = 0\n assert len(img_list) == len(detected_box) == len(gt_boxes_list), \"wrong number!\"\n for img_idx, detect_box, gt_box in tqdm(zip(img_list, detected_box, gt_boxes_list)):\n gt_box = np.array(gt_box, dtype=np.float32).reshape(-1, 4)\n proc_idx += 1\n if detect_box.shape[0] == 0:\n continue\n img = cv2.imread(img_idx)\n detect_box = convert_to_square(detect_box)\n detect_box[:, :4] = np.round(detect_box[:, :4])\n for f in [pos_file, part_file, neg_file]:\n f.write(img_idx + '\\n')\n neg_num = 0\n # print(\"proc_idx = {}, gt_box = {}\".format(proc_idx, gt_box))\n for box in detect_box:\n xl, yl, xr, yr, _ = box.astype(int)\n width = xr - xl + 1\n height = yr - yl + 1\n\n # filter too small or exceed boundary\n if width < 20 or height < 20 or xl < 0 or yl < 0 or\\\n xr >= img.shape[1] or yr >= img.shape[0]:\n continue\n iou_value = iou(box, gt_box)\n cropped_img = img[yl:yr+1, xl:xr+1, :]\n resized_img = cv2.resize(cropped_img, (save_size, save_size),\n interpolation=cv2.INTER_LINEAR)\n if np.max(iou_value) < 0.3 and neg_num < 60:\n neg_file.write(\"%d %d %d %d 0\\n\" % (xl, yl, xr, yr))\n neg_idx += 1\n neg_num += 1\n else:\n idx = np.argmax(iou_value)\n corresponding_gt_box = gt_box[idx]\n xl_gt, yl_gt, xr_gt, yr_gt = corresponding_gt_box\n\n offset_xl = (xl_gt - xl) / float(width)\n offset_yl = (yl_gt - yl) / float(height)\n offset_xr = (xr_gt - xr) / float(width)\n offset_yr = (yr_gt - yr) / float(height)\n if np.max(iou_value) >= 0.65:\n pos_file.write('%d %d %d %d 1 %.2f %.2f %.2f %.2f\\n' %\n (xl, yl, xr, yr, offset_xl, offset_yl, offset_xr, offset_yr))\n pos_idx += 1\n elif np.max(iou_value) >= 0.4:\n part_file.write('%d %d %d %d -1 %.2f %.2f %.2f %.2f\\n' %\n (xl, yl, xr, yr, offset_xl, offset_yl, offset_xr, offset_yr))\n part_idx += 1\n if proc_idx >= min(len(detected_box), num_img):\n break\n neg_file.close()\n part_file.close()\n pos_file.close()\n\n\n# def parse_arguments(argv):\n# parser = argparse.ArgumentParser()\n#\n# parser.add_argument('--input_size', type=int,\n# help='The input size for specific net')\n# parser.add_argument('--gray_input', type=bool, default=True)\n# return parser.parse_args(argv)\n#\n#\n# if __name__ == '__main__':\n# main(parse_arguments(sys.argv[1:]))\n","repo_name":"linkboyx/mtcnn-facedetect-tf2","sub_path":"preprocess/gen_hard_example.py","file_name":"gen_hard_example.py","file_ext":"py","file_size_in_byte":5908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8208588380","text":"import numba\nimport numpy as np\nimport torch\n\nfrom mmdet3d.ops.iou3d.iou3d_utils import nms_gpu, nms_normal_gpu\n\n\ndef nms_gpu(boxes, scores, thresh, pre_maxsize=None, post_max_size=None):\n \"\"\"Nms function with gpu implementation.\n\n Args:\n boxes (torch.Tensor): Input boxes with the shape of [N, 5]\n ([x1, y1, x2, y2, ry]).\n scores (torch.Tensor): Scores of boxes with the shape of [N].\n thresh (int): Threshold.\n pre_maxsize (int): Max size of boxes before nms. Default: None.\n post_maxsize (int): Max size of boxes after nms. Default: None.\n\n Returns:\n torch.Tensor: Indexes after nms.\n \"\"\"\n order = scores.sort(0, descending=True)[1]\n\n if pre_maxsize is not None:\n order = order[:pre_maxsize]\n boxes = boxes[order].contiguous()\n\n keep = torch.zeros(boxes.size(0), dtype=torch.long)\n num_out = iou3d_cuda.nms_gpu(boxes, keep, thresh, boxes.device.index)\n keep = order[keep[:num_out].cuda(boxes.device)].contiguous()\n if post_max_size is not None:\n keep = keep[:post_max_size]\n return keep\n\n\n@numba.jit(nopython=True)\ndef circle_nms(dets, thresh, post_max_size=83):\n \"\"\"Circular NMS.\n\n An object is only counted as positive if no other center\n with a higher confidence exists within a radius r using a\n bird-eye view distance metric.\n\n Args:\n dets (torch.Tensor): Detection results with the shape of [N, 3].\n thresh (float): Value of threshold.\n post_max_size (int): Max number of prediction to be kept. Defaults\n to 83\n\n Returns:\n torch.Tensor: Indexes of the detections to be kept.\n \"\"\"\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n scores = dets[:, 2]\n order = scores.argsort()[::-1].astype(np.int32) # highest->lowest\n ndets = dets.shape[0]\n suppressed = np.zeros((ndets), dtype=np.int32)\n keep = []\n for _i in range(ndets):\n i = order[_i] # start with highest score box\n if suppressed[\n i] == 1: # if any box have enough iou with this, remove it\n continue\n keep.append(i)\n for _j in range(_i + 1, ndets):\n j = order[_j]\n if suppressed[j] == 1:\n continue\n # calculate center distance between i and j box\n dist = (x1[i] - x1[j])**2 + (y1[i] - y1[j])**2\n\n # ovr = inter / areas[j]\n if dist <= thresh:\n suppressed[j] = 1\n return keep[:post_max_size]\n","repo_name":"wlzhao22/SAFL","sub_path":"mmdet3d/datasets/mmdet3d_utils.py","file_name":"mmdet3d_utils.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32417089956","text":"#!/usr/bin/env python3\n\n\"\"\"\nWrite a program that counts how many salutes are exchanged during a typical\nwalk along a hallway. The hall is represented by a string. For example:\n\"--->-><-><-->-\"\n\nEach hallway string will contain three different types of characters: '>', an\nemployee walking to the right; '<', an employee walking to the left; and '-',\nan empty space. Every employee walks at the same speed either to right or to\nthe left, according to their direction. Whenever two employees cross, each of\nthem salutes the other. They then continue walking until they reach the end,\nfinally leaving the hallway. In the above example, they salute 10 times.\n\nWrite a function answer(s) which takes a string representing employees walking\nalong a hallway and returns the number of times the employees will salute. s\nwill contain at least 1 and at most 100 characters, each one of -, >, or <.\n\nTest cases\n==========\n\nInputs:\n (string) s = \">----<\"\nOutput:\n (int) 2\n\nInputs:\n (string) s = \"<<>><\"\nOutput:\n (int) 4\n\"\"\"\n\n\ndef answer(s):\n count = 0\n salute = 0\n for c in s:\n if c == '>':\n count +=1\n elif c == '<':\n salute += 2*count\n return salute\n\nprint(answer(\">----<\"))\nprint(answer(\"<<>><\"))\nprint(answer(\"><><\"))\n","repo_name":"antoinemadec/test","sub_path":"python/foobar/en_route_salute/en_route_salute.py","file_name":"en_route_salute.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"7063320877","text":"# -*- coding=utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport sys\n\nsys.path.append('..')\nfrom keras.engine import Layer\nimport keras.backend as K\nfrom keras.models import Sequential, Model\nfrom keras import activations\nfrom keras import initializers\nfrom keras import optimizers\nfrom keras.initializers import RandomUniform, TruncatedNormal\nfrom keras.layers import Reshape, Embedding, Dot, \\\n Input, Dense, Conv2D, CuDNNLSTM, Softmax, Lambda, Bidirectional, Concatenate\nfrom keras.optimizers import Adam\nfrom recurrentshop import LSTMCell\nfrom models.model import BasicModel\nfrom layers.Linear import Linear\nimport numpy as np\nfrom collections import namedtuple\nimport tensorflow as tf\nfrom SummarizationUtils.data import Vocab\nfrom SummarizationUtils.batcher import Batcher\nfrom SummarizationUtils.generator import SummarizationGenerator\n\n\n# Given the reference summary as a sequence of tokens, return the input sequence for the decoder,\n# and the target sequence which we will use to calculate loss. \n# The sequence will be truncated if it is longer than max_len. \n# The input sequence must start with the start_id and the target sequence must end with\n# the stop_id (but not if it's been truncated).\n#\n# inp = [start_id] + sequence[:]\n# target = sequence[:]\n#\n\n\nclass SummarizationModel(BasicModel):\n def __init__(self, vocab, config, hps):\n super(SummarizationModel, self).__init__(config)\n self.__name = 'pointer_generator_summarizer'\n self.config = config\n self.hps = hps\n\n self.mode = config['mode']\n self.use_coverage = config['use_coverage']\n self.pointer_gen = config['pointer_gen']\n self.embed_trainable = config['train_embed']\n self.embedding_size = config['embed_size']\n self.vsize = config['vocab_size']\n self.rand_unif_init_mag = config['rand_unif_init_mag']\n self.trunc_norm_init_std = config['trunc_norm_init_std']\n self.hidden_units = self.config['hidden_units']\n self.cov_loss_wt = self.config['cov_loss_wt']\n\n # Initializers:\n self.rand_unif_init = RandomUniform(minval=-self.rand_unif_init_mag,\n maxval=self.rand_unif_init_mag,\n seed=123)\n self.trunc_norm_init = TruncatedNormal(stddev=self.trunc_norm_init_std)\n # Optimizers:\n self.adg = optimizers.TFOptimizer(\n K.tf.train.AdagradOptimizer(self.hps.lr, initial_accumulator_value=self.hps.adagrad_init_acc))\n # Layers\n self.Emb = Embedding(self.vsize,\n self.embedding_size,\n weights=config['embed'],\n trainable=self.embed_trainable\n )\n\n # different dictionary for source and target\n\n # Bi-directional lstm encoder, return (output, states)\n # Dimension: 2*hidden_units\n # concatenated forward and backward vectors\n self.Encoder = Bidirectional(CuDNNLSTM(self.hidden_units,\n return_state=True,\n return_sequences=True,\n kernel_initializer=self.rand_unif_init\n ))\n # Decoder is not bi-directional, perform linear reduction...\n # Dense_layer_dimension=encoder_hidden_units\n\n # Encoder states and output tensors are separated...\n # to initialize decoder\n\n # Decoder cell input: [input, state_h, state_c]\n self.DecoderCell = LSTMCell(self.hidden_units,\n kernel_initializer=self.rand_unif_init,\n bias_initializer=\"zeros\",\n recurrent_initializer=self.rand_unif_init)\n # Decoder output projector\n # to probabilities[word_index]\n self.DecoderOutputProjector = Dense(self.vsize,\n kernel_initializer=self.trunc_norm_init,\n bias_initializer=self.trunc_norm_init,\n activation=None\n )\n self.ConcatenateAxis1 = Concatenate(axis=1)\n self.ConcatenateLastDim = Concatenate(axis=-1)\n self.StackSecondDim = Lambda(lambda x: K.tf.stack(x, axis=1))\n self.SoftmaxforScore = Softmax(axis=-1)\n\n self._batch_size = None\n self._enc_batch = None\n self._enc_lens = None\n self._enc_padding_mask = None\n self._enc_batch_extend_vocab = None\n self._max_art_oovs = None\n self._max_art_oovs_inp = None\n self._dec_batch = None\n self._target_batch = None\n self._dec_padding_mask = None\n self._dec_in_state = None\n self._enc_states = None\n self._dec_out_state = None\n self.p_gens = None\n self.prev_coverage = None\n self.coverage = None\n self._coverage_loss = None\n\n self.check_list = []\n\n if not self.check():\n pass\n pass\n\n def ReduceStates(self, concatenated_h, concatenated_c):\n Linear_Reduce_h = Dense(self.config['hidden_units'],\n activation='relu',\n kernel_initializer=self.trunc_norm_init,\n bias_initializer=self.trunc_norm_init\n )\n Linear_Reduce_c = Dense(self.config['hidden_units'],\n activation='relu',\n kernel_initializer=self.trunc_norm_init,\n bias_initializer=self.trunc_norm_init\n )\n new_concatenated_h = Linear_Reduce_h(concatenated_h)\n new_concatenated_c = Linear_Reduce_c(concatenated_c)\n\n return new_concatenated_h, new_concatenated_c\n\n def TargetEmb(self, target_seq):\n segmented_into_words = Lambda(lambda x: [word for word in K.tf.unstack(x, axis=1)])(target_seq)\n # [(batch_size, w1), (batch_size, w2), ..., (batch_size, w_max_text_length)], length = max_text_length\n embedding_list = [self.Emb(x) for x in segmented_into_words]\n return embedding_list\n\n def get_encoder_decoder_inputs(self, source_seq, target_seq):\n # Attention:\n emb_enc_inputs = self.Emb(source_seq) # a tensor\n emb_dec_inputs = self.TargetEmb(target_seq) # list of embeddings\n\n ####\n # emb_enc_inputs = tf.nn.embedding_lookup(embedding, self._enc_batch)\n # tensor with shape (batch_size, max_enc_steps, emb_size)\n # emb_dec_inputs = [tf.nn.embedding_lookup(embedding, x) for x in tf.unstack(self._dec_batch, axis=1)]\n # list length max_dec_steps containing shape (batch_size, emb_size)\n ####\n return emb_enc_inputs, emb_dec_inputs\n\n def attention_decoder(self,\n decoder_inputs,\n initial_state,\n encoder_states,\n enc_padding_mask,\n Cell,\n initial_state_attention=False,\n pointer_gen=True,\n use_coverage=False,\n prev_coverage=None):\n\n # Requirements:\n # decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n #\n # initial_state: 2D Tensor [batch_size x cell.state_size].\n # for the initialization of decoder states\n # encoder_states: (batchsize, timestep, 2*hiddenunits)\n # [batch_size, attn_length, attn_size].\n #\n # enc_padding_mask: 2D Tensor [batch_size x attn_length] containing 1s and 0s;\n # indicates which of the encoder locations are padding (0) or a real token (1).\n # cell: rnn_cell.RNNCell defining the cell function and size.\n #\n # initial_state_attention:\n # Note that this attention decoder passes each decoder input through a linear layer\n # with the previous step's context vector to get a modified version of the input.\n # If initial_state_attention is False,\n # on the first decoder step the \"previous context vector\" is just a zero vector.\n # If initial_state_attention is True, we use initial_state to (re)calculate the previous step's context vector.\n # We set this to False for train/eval mode (because we call attention_decoder once for all decoder steps)\n # and True for decode mode (because we call attention_decoder once for each decoder step).\n #\n # pointer_gen: boolean. If True, calculate the generation probability p_gen for each decoder step.\n #\n # use_coverage: boolean. If True, use coverage mechanism.\n #\n # prev_coverage:\n # If not None, a tensor with shape (batch_size, attn_length). The previous step's coverage vector.\n # This is only not None in decode mode when using coverage.\n\n # NOTE:\n # To initialize a keras CUDNNLSTM layer's state:\n # ##################################################\n # if isinstance(inputs, list):\n # initial_state = inputs[1:]\n # inputs = inputs[0]\n # elif initial_state is not None:\n # pass\n # elif self.stateful:\n # initial_state = self.states\n # else:\n # initial_state = self.get_initial_state(inputs)\n #\n # ##################################################\n attn_size = K.int_shape(encoder_states)[2]\n input_size = K.int_shape(decoder_inputs[0])[1]\n\n encoder_states = Lambda(lambda x: K.expand_dims(x, axis=2))(encoder_states)\n # now : encoder_states.shape = (batch_size,attn_length,1,attention_vec_size)\n attention_vec_size = attn_size\n W_h_shape = (1, 1, attn_size, attention_vec_size)\n Encoder_Feature_Extractor = Conv2D(kernel_size=(W_h_shape[0], W_h_shape[1]),\n filters=W_h_shape[3],\n padding=\"same\",\n data_format=\"channels_last\"\n )\n # W_h = [filter_height, filter_width, in_channels, out_channels]\n encoder_features = Encoder_Feature_Extractor(encoder_states)\n # nn_ops.conv2d(encoder_states, W_h, [1, 1, 1, 1], \"SAME\")\n # shape (batch_size,attn_length, 1 , attention_vec_size)\n if use_coverage:\n w_c = (1, 1, 1, attention_vec_size)\n Coverage_Feature_Extractor = Conv2D(kernel_size=(w_c[0], w_c[1]),\n filters=w_c[3],\n padding=\"same\",\n data_format=\"channels_last\"\n )\n\n if prev_coverage is not None:\n expand_2_3 = Lambda(lambda x: K.expand_dims(K.expand_dims(x, 2), 3))\n prev_coverage = expand_2_3(prev_coverage)\n\n # v: shared vector, attention_vec_size-dim -> 1-dim, calculating\n V = Dense(1,\n use_bias=False,\n kernel_initializer='glorot_uniform') # shape : [attention_vec_size]\n Attn_Dist_and_Encoder_States_to_Context_Vector = Lambda(\n lambda X: attn_dist_and_encoder_states_to_context_vector(X, attn_size))\n Masked_Attention = Lambda(lambda x: masked_attention(x, enc_padding_mask))\n Features_Adder = Lambda(lambda x: sum_and_tanh(x))\n Squeezer_3_2 = Lambda(lambda x: K.squeeze(K.squeeze(x, axis=3), axis=2))\n Expand_Dim_2_2 = Lambda(lambda x: K.expand_dims(K.expand_dims(x, 2), 2))\n Attention_Linear_layer = Linear(attention_vec_size, True)\n # the linear layer used in attention(...),\n # transform decoder_state to decoder_features\n Decoder_Input_to_Cell_Input = Linear(input_size, True)\n Calculate_pgen_Linear_layer = Linear(1, True, activation='sigmoid')\n AttnOutputProjection_Linear_layer = Linear(Cell.output_dim, True)\n Expand_1_1 = Lambda(lambda x: K.expand_dims(K.expand_dims(x, axis=1), axis=1))\n\n def attention(decoder_state, coverage=None):\n # Calculate the context vector and attention distribution from the decoder state.\n # Args:\n # decoder_state: state of the decoder\n # coverage: Optional. Previous timestep's coverage vector, shape (batch_size, attn_len, 1, 1).\n # Returns:\n # context_vector: weighted sum of encoder_states\n # attn_dist: attention distribution\n # coverage: new coverage vector. shape (batch_size, attn_len, 1, 1)\n\n decoder_features = Attention_Linear_layer(decoder_state) # shape (batch_size, attention_vec_size)\n decoder_features = Expand_1_1(decoder_features) # reshape to (batch_size, 1, 1, attention_vec_size)\n\n if use_coverage and coverage is not None:\n coverage_features = Coverage_Feature_Extractor(coverage)\n added_features = Features_Adder([encoder_features, decoder_features, coverage_features])\n # added_features: shape (batch_size,attn_length, 1, 1)\n e = Squeezer_3_2(V(added_features))\n # e: shape (batch_size,attn_length)\n # Calculate attention distribution\n attn_dist = Masked_Attention(e)\n # Update coverage vector\n # sum over the input sequence\n\n coverage = Lambda(lambda x: x[0] + Reshape((-1, 1, 1))(x[1]))([coverage, attn_dist])\n else:\n added_features = Features_Adder([encoder_features, decoder_features])\n # added_features: shape (batch_size,attn_length, 1, 1)\n e = Squeezer_3_2(V(added_features))\n attn_dist = Masked_Attention(e)\n if use_coverage: # first step of training\n coverage = Expand_Dim_2_2(attn_dist) # initialize coverage\n\n context_vector = Attn_Dist_and_Encoder_States_to_Context_Vector([attn_dist, encoder_states])\n # context_vector = math_ops.reduce_sum(array_ops.reshape(attn_dist,\n # [batch_size, -1, 1, 1]) * encoder_states,\n # [1, 2]) # shape (batch_size, attn_size).\n # context_vector = array_ops.reshape(context_vector, [-1, attn_size])\n\n return context_vector, attn_dist, coverage\n\n # ####END OF ATTENTION#### #\n\n # Return values:\n outputs = []\n attn_dists = []\n p_gens = []\n # initial_state is a list/ tuple\n state_h, state_c = initial_state[0], initial_state[1]\n coverage_ret = prev_coverage # initialize coverage to None or whatever was passed in\n\n # re-typed to tf.Tensor for backend operations\n context_vector_ret = Lambda(lambda x: K.zeros(shape=(self._batch_size, attn_size)))([])\n # Get a zero-initialized context vector\n if initial_state_attention:\n # Re-calculate the context vector from the previous step\n # so that we can pass it through a linear layer with this step's input\n # to get a modified version of the input\n context_vector_ret, _, coverage_ret = attention(initial_state, coverage_ret)\n # in decode mode, this is what updates the coverage vector\n # otherwise, context_vector & coverage are zero vectors\n for i, inp in enumerate(decoder_inputs):\n transformed_inp = Decoder_Input_to_Cell_Input([inp, context_vector_ret])\n cell_output, state_h, state_c = Cell([transformed_inp, state_h, state_c])\n if i == 0 and initial_state_attention: # always true in decode mode\n context_vector_ret, attn_dist_ret, _ = attention([state_h, state_c], coverage_ret)\n # don't allow coverage to update\n else:\n context_vector_ret, attn_dist_ret, coverage_ret = attention([state_h, state_c], coverage_ret)\n attn_dists.append(attn_dist_ret)\n\n if pointer_gen:\n p_gen = Calculate_pgen_Linear_layer([context_vector_ret, state_h, state_c, transformed_inp])\n p_gens.append(p_gen)\n\n output = AttnOutputProjection_Linear_layer([cell_output, context_vector_ret])\n outputs.append(output)\n\n print('finished adding attention_decoder for each time step!')\n if coverage_ret is not None:\n coverage_ret = Lambda(lambda x: K.reshape(x, [self._batch_size, -1]))(coverage_ret)\n\n return outputs, [state_h, state_c], attn_dists, p_gens, coverage_ret\n\n # ####END OF ATTENTION_DECODER#### #\n\n def _add_decoder(self, inputs):\n # Args:\n # inputs: inputs to the decoder (word embeddings). (batch_size, emb_dim)\n # stored in list\n _Cell = self.DecoderCell\n _prev_coverage = self.prev_coverage \\\n if self.mode == 'decode' and self.coverage else None\n\n # attention_decoder(inputs, self._dec_in_state, self._enc_states, self._enc_padding_mask, cell,\n # initial_state_attention=(hps.mode==\"decode\"),\n # pointer_gen=hps.pointer_gen, use_coverage=hps.coverage, prev_coverage=prev_coverage)\n outputs, out_state, attn_dists, p_gens, coverage = \\\n self.attention_decoder(decoder_inputs=inputs,\n initial_state=self._dec_in_state,\n encoder_states=self._enc_states,\n enc_padding_mask=self._enc_padding_mask,\n Cell=_Cell,\n initial_state_attention=(self.mode == 'decode'),\n pointer_gen=self.pointer_gen,\n use_coverage=self.use_coverage,\n prev_coverage=_prev_coverage\n )\n return outputs, out_state, attn_dists, p_gens, coverage\n\n # END OF _ADD_DECODER #\n\n def _calc_final_dist(self, vocab_dists, attn_dists):\n WeightMultLayer = Lambda(lambda x: x[0] * x[1])\n SupWeightMultLayer = Lambda(lambda x: (1 - x[0]) * x[1])\n DistPlus = Lambda(lambda x: x[0] + x[1])\n\n vocab_dists = [WeightMultLayer([a, b]) for a, b in zip(self.p_gens, vocab_dists)]\n attn_dists_weighted = [SupWeightMultLayer([a, b]) for a, b in zip(self.p_gens, attn_dists)]\n\n extra_zeros = Lambda(lambda x: K.zeros(shape=(self._batch_size, self._max_art_oovs), dtype='float32'))([])\n extended_vsize = Lambda(lambda x: self.vsize + x)(self._max_art_oovs)\n vocab_dists_extended = [self.ConcatenateAxis1([dist, extra_zeros]) for dist in vocab_dists]\n\n # Project the values in the attention distributions onto the appropriate entries in the final distributions\n # This means that if a_i = 0.1 and the ith encoder word is w,\n # and w has index 500 in the vocabulary, then we add 0.1 onto the 500th entry of the final distribution\n # This is done for each decoder timestep.\n # This is fiddly; we use tf.scatter_nd to do the projection\n shape = [self._batch_size, extended_vsize]\n\n def preparation(x):\n batch_nums = K.tf.range(0, limit=self._batch_size) # shape (batch_size)\n batch_nums = K.tf.expand_dims(batch_nums, 1) # shape (batch_size, 1)\n attn_len = K.tf.shape(self._enc_batch_extend_vocab)[1] # number of states we attend over\n batch_nums = K.tf.tile(batch_nums, multiples=[1, attn_len])\n indices = K.tf.stack((batch_nums, self._enc_batch_extend_vocab), axis=2)\n return indices\n\n indices = Lambda(preparation)([])\n ScatterNdList = [Lambda(\n lambda x: K.tf.scatter_nd(indices, x, shape=shape, name='making_attn_dists_projected_at_step_%d' % _index),\n name='making_attn_dists_projected_at_step_%d' % _index)\n for _index in range(len(attn_dists_weighted))]\n\n attn_dists_projected = [\n ScatterNdList[_index](copy_dist)\n for _index, copy_dist in enumerate(attn_dists_weighted)]\n\n final_dists = [DistPlus([a, b]) for a, b in zip(vocab_dists_extended, attn_dists_projected)]\n\n def _add_epsilon(epsilon=1e-9):\n # return add-epsilon layer\n _AddEpsilon = Lambda(lambda x: x + K.tf.ones_like(x) * epsilon)\n return _AddEpsilon\n\n AddEpsilon = _add_epsilon()\n final_dists = [AddEpsilon(dist) for dist in final_dists]\n\n return final_dists, attn_dists\n\n def setup(self):\n pass\n\n def build(self):\n # Input: text to be summarized\n # source sequence -> encoder input\n self._enc_batch = Input(name='source', shape=(None,), dtype='int32')\n self._enc_lens = Input(name='source_length', shape=(1,), dtype='int32')\n self._enc_padding_mask = Input(name='encoder_padding_mask', shape=(None,), dtype='float32')\n\n if self.pointer_gen:\n self._enc_batch_extend_vocab = Input(name='extend_vocab', shape=(None,), dtype='int32')\n # same size within batch\n self._max_art_oovs_inp = Input(name='oovs_in_this_batch', shape=(1,), dtype='int32')\n\n self._max_art_oovs = Lambda(lambda x: x[0][0])(self._max_art_oovs_inp) # 1-dim tensor\n\n # target sequence -> decoder input [<start>, seq[0], seq[1], ...]\n # -> decoder mapping target [seq[0], seq[1], seq[2], ...]\n self._dec_batch = Input(name='decoder_input', shape=(self.config['max_dec_steps'],), dtype='int32')\n self._target_batch = Input(name='target', shape=(self.config['max_dec_steps'],), dtype='int32')\n self._dec_padding_mask = Input(name='decoder_padding_mask',\n shape=(self.config['max_dec_steps'],), dtype='float32')\n if self.mode == 'decode':\n self.prev_coverage = Input(name='prev_coverage', shape=(None,))\n\n self._batch_size = Lambda(lambda x: K.shape(x)[0])(self._enc_batch)\n\n emb_enc_inputs, emb_dec_inputs = self.get_encoder_decoder_inputs(self._enc_batch, self._dec_batch)\n enc_outputs, forward_h, forward_c, backward_h, backward_c = self.Encoder(emb_enc_inputs)\n encoder_states = [self.ConcatenateLastDim([forward_h, backward_h]),\n self.ConcatenateLastDim([forward_c, backward_c])]\n # ATTENTION:\n # return_sequence=True\n # enc_outputs.shape: (batchsize, timestep, 2*hiddenunits)\n # encoder_output & encoder_states dimension: 2*hidden_units\n # encoder_states[0] = concatenate (forward.h and backward.h)\n # encoder_states[1] = concatenate (forward.c and backward.c)\n self._enc_states = enc_outputs\n new_state_h, new_state_c = self.ReduceStates(encoder_states[0], encoder_states[1])\n self._dec_in_state = [new_state_h, new_state_c]\n # reduced_output & reduced_states dimension: hidden_units\n\n decoder_outputs, self._dec_out_state, attn_dists, self.p_gens, self.coverage = \\\n self._add_decoder(emb_dec_inputs)\n\n # decoder_outputs = Lambda(lambda x: K.tf.stack(x, axis=1))(decoder_outputs) ###\n\n vocab_scores = [self.DecoderOutputProjector(output_) for output_ in decoder_outputs]\n vocab_dists = [self.SoftmaxforScore(score_) for score_ in vocab_scores]\n\n if self.pointer_gen:\n final_dists, attn_dists = self._calc_final_dist(vocab_dists=vocab_dists,\n attn_dists=attn_dists)\n else:\n final_dists = vocab_dists\n\n if self.mode == \"decode\":\n assert False, 'Decode mode not implemented'\n pass\n\n stacked_final_dists = Lambda(lambda x: K.tf.stack(x, axis=1), name='stacked_final_dists')(final_dists)\n stacked_attn_dists = Lambda(lambda x: K.tf.stack(x, axis=1), name='stacked_attn_dists')(attn_dists)\n\n if self.use_coverage:\n self.outputs = [stacked_attn_dists, stacked_final_dists]\n else:\n self.outputs = [stacked_final_dists]\n\n if self.pointer_gen:\n model = Model(inputs=[self._enc_batch, self._enc_lens,\n self._enc_padding_mask, self._enc_batch_extend_vocab,\n self._max_art_oovs_inp, self._dec_batch,\n self._target_batch, self._dec_padding_mask],\n outputs=self.outputs)\n else:\n model = Model(inputs=[], outputs=[])\n\n self.outputs_shapes = [K.shape(x) for x in self.outputs]\n\n (loss_functions, loss_weights) = ([loss_wrapper(self._dec_padding_mask)[0],\n loss_wrapper(self._dec_padding_mask)[1]],\n [1., 1.]) if self.use_coverage else (\n [loss_wrapper(self._dec_padding_mask)[1]], [1.])\n\n model.compile(optimizer=self.adg,\n loss=loss_functions, loss_weights=loss_weights)\n\n self.model = model\n # #### END OF BUILD ####\n\n\ndef _mask_and_avg(values, padding_mask):\n \"\"\"Applies mask to values then returns overall average (a scalar)\n Args:\n values: a list length max_dec_steps containing arrays shape (batch_size).\n padding_mask: tensor shape (batch_size, max_dec_steps) containing 1s and 0s.\n Returns:\n a scalar\n \"\"\"\n dec_lens = K.tf.reduce_sum(padding_mask, axis=1, name='dec_lens') # shape batch_size. float32\n values_per_step = []\n for dec_step, v in enumerate(values):\n values_per_step.append(v * padding_mask[:, dec_step])\n values_per_ex = sum(values_per_step) / dec_lens # shape (batch_size); normalized value for each batch member\n return K.tf.reduce_mean(values_per_ex, name='reduce_mean_in_mask_avg') # overall average\n\n\ndef loss_wrapper(mask):\n def calc_loss_at_timestep_t(range_batch, t, dist_at_t, _target_batch):\n # Return:\n # losses: loss of all samples in a batch at time step t\n targets = K.tf.strided_slice(_target_batch, [0, t], [K.tf.shape(_target_batch)[0], t + 1], shrink_axis_mask=2,\n name='slicing_for_targets_in_calc_loss_at_timestep_t') # shape: (batch_size, )\n indices = K.tf.stack((range_batch, targets), axis=1) # shape (batch_size, 2)\n gold_probs = K.tf.gather_nd(dist_at_t, indices) # shape (batch_size). prob of correct words on this step\n losses = -K.tf.log(gold_probs)\n return losses\n\n def _loss(y_true, y_pred):\n # Params:\n # y_pred : final_dists, distributions of words, shape (batch_size, time_steps, vocab_size) (float)\n # y_true : indices of true words, shape (batch_size, time_steps, ) (int)\n\n y_true = K.tf.cast(y_true[:, :, 0], 'int32', 'cast_to_int_in_loss')\n\n loss_per_step = []\n _batchsize = K.shape(y_pred)[0]\n batch_nums = K.tf.range(0, limit=_batchsize) # shape: (batch_size, )\n for dec_step, dist in enumerate(K.tf.unstack(y_pred, axis=1)):\n losses = calc_loss_at_timestep_t(batch_nums, dec_step, dist, y_true)\n loss_per_step.append(losses)\n _loss_ret = _mask_and_avg(loss_per_step, padding_mask=mask)\n\n return _loss_ret\n\n def _coverage_loss(y_true, y_pred):\n # Params:\n # y_pred : attn_dists, distributions of words, shape (batch_size, time_steps, vocab_size) (float)\n # y_true : indices of true words, shape (batch_size, time_steps, vocab_size ) (int)\n # keras requires y_true and y_pred to be the same shape,\n # thus y_true is repeated vocab_size times on the last dim\n\n _y_pred = K.tf.unstack(y_pred, axis=1, name='unstacking_attn_dists_in_coverage_loss')\n coverage = K.tf.zeros_like(_y_pred[0])\n covlosses = []\n for a in _y_pred:\n covloss = K.tf.reduce_sum(K.tf.minimum(a, coverage), [1]) # calculate the coverage loss for this step\n covlosses.append(covloss)\n coverage += a # update the coverage vector\n _coverage_loss_ret = _mask_and_avg(covlosses, padding_mask=mask)\n return _coverage_loss_ret\n\n return _coverage_loss, _loss\n\n\n# Utility\ndef sum_and_tanh(X):\n # Input: X: list of tensors\n # Return: tanh(X[0] + X[1] + ...)\n tmp = X[0]\n for x in X[1:]:\n tmp += x\n tmp = activations.tanh(tmp)\n return tmp\n\n\ndef attn_dist_and_encoder_states_to_context_vector(X, attn_size):\n # X[0] : attn_dist; X[1] : encoder_states\n reshaped = K.reshape(X[0], (K.shape(X[0])[0],) + (-1, 1, 1)) * X[1]\n con_vec = K.sum(reshaped, [1, 2])\n con_vec = K.reshape(con_vec, (K.shape(X[0])[0], attn_size))\n return con_vec\n\n\ndef masked_attention(e, enc_padding_mask):\n # TODO:\n # epsilon ???\n \"\"\"Take softmax of e then apply enc_padding_mask and re-normalize\"\"\"\n attn_dist = K.softmax(e)\n attn_dist *= enc_padding_mask\n masked_sums = K.sum(attn_dist, axis=1)\n return attn_dist / K.reshape(masked_sums, [-1, 1])\n\n\ndef main():\n ConfigtoFeed = {'mode': 'train',\n 'use_coverage': True,\n 'pointer_gen': True,\n 'train_embed': True,\n 'embed_size': 300,\n 'vocab_size': 100000,\n 'rand_unif_init_mag': 0.01,\n 'trunc_norm_init_std': 0.01,\n 'hidden_units': 128,\n 'max_dec_steps': 100,\n 'cov_loss_wt': 1.0,\n 'embed': None\n }\n hps, hps_dict = get_hps()\n configs = {}\n for kys in ConfigtoFeed:\n if kys in hps_dict:\n configs[kys] = hps_dict[kys]\n else:\n configs[kys] = ConfigtoFeed[kys]\n\n MD = SummarizationModel(None, configs, hps)\n MD.build()\n vocab_path = '/path/to/vocab'\n train_data_path = '/path/to/train*'\n valid_data_path = '/path/to/val*'\n single_pass = True\n vocab = Vocab(vocab_path, hps.vocab_size)\n\n train_batcher = Batcher(train_data_path, vocab, hps, single_pass=single_pass)\n TRAIN_GEN = SummarizationGenerator(hps, train_batcher)\n train_generator = TRAIN_GEN.get_batch_generator()\n\n valid_batcher = Batcher(valid_data_path, vocab, hps, single_pass=False)\n VALID_GEN = SummarizationGenerator(hps, valid_batcher)\n valid_generator = VALID_GEN.get_batch_generator()\n\n print(\"TRAINING IN PROGRESS...\")\n\n for i_e in range(100):\n\n history = MD.model.fit_generator(train_generator,\n steps_per_epoch=200,\n epochs=1,\n shuffle=False,\n verbose=0) # ~3200 samples per call\n for x in history.history:\n print('training:', history.history[x])\n\n validation = MD.model.evaluate_generator(valid_generator, steps=100, verbose=0) # ~1600 samples per call\n print('validation:', validation)\n\n\n# Where to find data\ntf.app.flags.DEFINE_string('data_path', '/mnt/E/WORK/DATA/CNN_DM/finished_files/chunked/train*',\n 'Path expression to tf.Example datafiles. Can include wildcards to access multiple datafiles.')\ntf.app.flags.DEFINE_string('vocab_path', '/mnt/E/WORK/DATA/CNN_DM/finished_files/vocab',\n 'Path expression to text vocabulary file.')\n\n# Important settings\ntf.app.flags.DEFINE_string('mode', 'train', 'must be one of train/eval/decode')\ntf.app.flags.DEFINE_boolean('single_pass', False,\n 'For decode mode only. If True, run eval on the full dataset using a fixed checkpoint, i.e. take the current checkpoint, and use it to produce one summary for each example in the dataset, write the summaries to file and then get ROUGE scores for the whole dataset. If False (default), run concurrent decoding, i.e. repeatedly load latest checkpoint, use it to produce summaries for randomly-chosen examples and log the results to screen, indefinitely.')\n\n# Where to save output\ntf.app.flags.DEFINE_string('log_root', '', 'Root directory for all logging.')\ntf.app.flags.DEFINE_string('exp_name', '',\n 'Name for experiment. Logs will be saved in a directory with this name, under log_root.')\n\n# Hyperparameters\ntf.app.flags.DEFINE_integer('hidden_dim', 256, 'dimension of RNN hidden states')\ntf.app.flags.DEFINE_integer('emb_dim', 128, 'dimension of word embeddings')\ntf.app.flags.DEFINE_integer('batch_size', 16, 'minibatch size') # originally 16\ntf.app.flags.DEFINE_integer('max_enc_steps', 400, 'max timesteps of encoder (max source text tokens)')\ntf.app.flags.DEFINE_integer('max_dec_steps', 100, 'max timesteps of decoder (max summary tokens)')\ntf.app.flags.DEFINE_integer('beam_size', 4, 'beam size for beam search decoding.')\ntf.app.flags.DEFINE_integer('min_dec_steps', 35,\n 'Minimum sequence length of generated summary. Applies only for beam search decoding mode')\ntf.app.flags.DEFINE_integer('vocab_size', 50000,\n 'Size of vocabulary. These will be read from the vocabulary file in order. If the vocabulary file contains fewer words than this number, or if this number is set to 0, will take all words in the vocabulary file.')\ntf.app.flags.DEFINE_float('lr', 0.15, 'learning rate')\ntf.app.flags.DEFINE_float('adagrad_init_acc', 0.1, 'initial accumulator value for Adagrad')\ntf.app.flags.DEFINE_float('rand_unif_init_mag', 0.02, 'magnitude for lstm cells random uniform inititalization')\ntf.app.flags.DEFINE_float('trunc_norm_init_std', 1e-4, 'std of trunc norm init, used for initializing everything else')\ntf.app.flags.DEFINE_float('max_grad_norm', 2.0, 'for gradient clipping')\n# Pointer-generator or baseline model\ntf.app.flags.DEFINE_boolean('pointer_gen', True, 'If True, use pointer-generator model. If False, use baseline model.')\n# Coverage hyperparameters\ntf.app.flags.DEFINE_boolean('use_coverage', False,\n 'Use coverage mechanism. Note, the experiments reported in the ACL paper train WITHOUT coverage until converged, and then train for a short phase WITH coverage afterwards. i.e. to reproduce the results in the ACL paper, turn this off for most of training then turn on for a short phase at the end.')\ntf.app.flags.DEFINE_float('cov_loss_wt', 1.0,\n 'Weight of coverage loss (lambda in the paper). If zero, then no incentive to minimize coverage loss.')\n# Utility flags, for restoring and changing checkpoints\ntf.app.flags.DEFINE_boolean('convert_to_coverage_model', False,\n 'Convert a non-coverage model to a coverage model. Turn this on and run in train mode. Your current training model will be copied to a new version (same name with _cov_init appended) that will be ready to run with coverage flag turned on, for the coverage training stage.')\ntf.app.flags.DEFINE_boolean('restore_best_model', False,\n 'Restore the best model in the eval/ dir and save it in the train/ dir, ready to be used for further training. Useful for early stopping, or if your training checkpoint has become corrupted with e.g. NaN values.')\n# Debugging. See https://www.tensorflow.org/programmers_guide/debugger\ntf.app.flags.DEFINE_boolean('debug', False, \"Run in tensorflow's debug mode (watches for NaN/inf values)\")\n\n\ndef get_hps():\n hparam_list = ['mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag', 'trunc_norm_init_std', 'max_grad_norm',\n 'vocab_size',\n 'hidden_dim', 'emb_dim', 'batch_size', 'max_dec_steps', 'max_enc_steps', 'use_coverage',\n 'cov_loss_wt',\n 'pointer_gen']\n hps_dict = {}\n for key, val in tf.app.flags.FLAGS.__flags.items(): # for each flag\n if key in hparam_list: # if it's in the list\n hps_dict[key] = val.value # add it to the dict\n hps = namedtuple(\"HParams\", hps_dict.keys())(**hps_dict)\n return hps, hps_dict\n\n\nif __name__ == '__main__':\n main()\n pass\n","repo_name":"lyutyuh/pointer-generator_keras","sub_path":"PointerGenerator.py","file_name":"PointerGenerator.py","file_ext":"py","file_size_in_byte":36486,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"30173877440","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom .views import MainHandler, ProcessHandler, DashboardHandler\nfrom .views import ProcessWebSocket\n\n\nINCLUDE_URLS = [\n (r\"/process/(?P<slug>[\\w-]+).ws\", ProcessWebSocket),\n (r\"/process/(?P<slug>[\\w-]+).json\", ProcessHandler),\n (r\"/dashboard/(?P<slug>[\\w-]+)\", DashboardHandler),\n (r\"/\", MainHandler),\n]\n","repo_name":"yupbank/mining","sub_path":"mining/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"72174906505","text":"import functools\nimport warnings\n\n\nclass jupyterlab_deprecation(Warning): # noqa\n \"\"\"Create our own deprecation class, since Python >= 2.7\n silences deprecations by default.\n \"\"\"\n\n pass\n\n\nclass deprecated: # noqa\n \"\"\"Decorator to mark deprecated functions with warning.\n Adapted from `scikit-image/skimage/_shared/utils.py`.\n\n Parameters\n ----------\n alt_func : str\n If given, tell user what function to use instead.\n behavior : {'warn', 'raise'}\n Behavior during call to deprecated function: 'warn' = warn user that\n function is deprecated; 'raise' = raise error.\n removed_version : str\n The package version in which the deprecated function will be removed.\n \"\"\"\n\n def __init__(self, alt_func=None, behavior=\"warn\", removed_version=None):\n self.alt_func = alt_func\n self.behavior = behavior\n self.removed_version = removed_version\n\n def __call__(self, func):\n alt_msg = \"\"\n if self.alt_func is not None:\n alt_msg = \" Use ``%s`` instead.\" % self.alt_func\n rmv_msg = \"\"\n if self.removed_version is not None:\n rmv_msg = \" and will be removed in version %s\" % self.removed_version\n\n msg = \"Function ``%s`` is deprecated\" % func.__name__ + rmv_msg + \".\" + alt_msg\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n if self.behavior == \"warn\":\n func_code = func.__code__\n warnings.simplefilter(\"always\", jupyterlab_deprecation)\n warnings.warn_explicit(\n msg,\n category=jupyterlab_deprecation,\n filename=func_code.co_filename,\n lineno=func_code.co_firstlineno + 1,\n )\n elif self.behavior == \"raise\":\n raise jupyterlab_deprecation(msg)\n return func(*args, **kwargs)\n\n # modify doc string to display deprecation warning\n doc = \"**Deprecated function**.\" + alt_msg\n if wrapped.__doc__ is None:\n wrapped.__doc__ = doc\n else:\n wrapped.__doc__ = doc + \"\\n\\n \" + wrapped.__doc__\n\n return wrapped\n","repo_name":"jupyterlab/jupyterlab","sub_path":"jupyterlab/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":13417,"dataset":"github-code","pt":"81"} +{"seq_id":"39430409629","text":"# 입력 받은 데이터 저장소\ndata_store = []\n\n\n# 데이터 유무 확인\ndef data_is_empty():\n if not data_store:\n print(\"현재 저장되어 있는 데이터가 없습니다.\")\n return False\n\n return True\n\n '''\n # 위 코드랑 완벽하게 같다.\n # not의 경우 데이터가 없으면 True, 있으면 False를 반환\n if len(data_store) == 0:\n print(\"현재 저장되어 있는 데이터가 없습니다.\")\n return False\n \n return True\n '''\n\n\n# 데이터 index 길이 구하고, 잘못 입력시 'n 이하로 입력해주세요' 명령\ndef data_index_checked(input_index):\n last_data_index = len(data_store) - 1\n\n if input_index > last_data_index:\n print(\"{} 이하로 입력해주세요.\".format(last_data_index))\n return\n\n\n# 도움말\ndef help_list():\n print(\"add : 데이터 추가\")\n print(\"read : 데이터 조회\")\n print(\"update : 데이터 수정\")\n print(\"delete : 데이터 삭제\")\n\n\n# 데이터 추가\ndef do_add():\n input_data = input(\"저장할 값을 입력해주세요 : \")\n print(\"{} 이/가 저장되었습니다.\".format(input_data))\n data_store.append(input_data)\n\n\n# 입력 된 게시물 리스트\ndef show_list():\n if data_is_empty() == True:\n print(\"현재 저장되어 있는 값 : {}\".format(data_store))\n\n\n# 게시물 수정\ndef do_update():\n if data_is_empty() == False:\n return\n\n print(data_store)\n input_index = int(input(\"수정 대상을 선택하세요 : \"))\n\n data_index_checked(input_index)\n\n input_update_data = input(\"어떤 값으로 수정할까요 : \")\n data_store[input_index] = input_update_data\n print(\"{} 로 값이 수정되었습니다.\".format(input_update_data))\n\n\n# 게시물 삭제\ndef do_delete():\n if data_is_empty() == False:\n return\n\n print(data_store)\n input_index = int(input(\"삭제 대상을 선택하세요 : \"))\n\n data_index_checked(input_index)\n\n print(\"{} 값이 삭제되었습니다.\".format(data_store[input_index]))\n del data_store[input_index]\n\n\ndef App():\n cmd = input(\"명령어를 입력해주세요 : \")\n\n if cmd == \"help\":\n help_list()\n\n elif cmd == \"add\":\n do_add()\n\n elif cmd == \"read\":\n show_list()\n\n elif cmd == \"update\":\n do_update()\n\n elif cmd == \"delete\":\n do_delete()\n\n elif cmd == \"exit\":\n print(\"프로그램이 종료 되었습니다.\")\n exit()\n\n else:\n print(\"올바른 명령어 아닙니다.\")\n\n\nwhile True:\n App()\n","repo_name":"SangWon7242/python_text_board_list_ver","sub_path":"Exam0806.py","file_name":"Exam0806.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37237876614","text":"from molmaps import distances, calculator, summary\nimport pandas as pd\nimport numpy as np\nfrom rdkit import Chem\nfrom tqdm import tqdm\ntqdm.pandas(ascii=True)\n\n\n\n\ndef caldis(data, idx, tag, methods = ['correlation', 'cosine', 'jaccard']):\n \n \n ##############################################################\n Nf = len(feature.fingerprint.Extraction().bitsinfo)\n data0 = loadnpy('./data/fingerprint_8206960.npy', N = Nf, dtype = np.bool)\n groups = data0.sum(axis=1)\n from sklearn.model_selection import GroupKFold\n G = GroupKFold(n_splits=10)\n sp = G.split(X = data0, groups=groups)\n spl = list(sp)\n sidx = spl[0][1]\n del data0\n print(len(sidx))\n \n data = data[sidx]\n data = data.astype(np.float32,copy=False)\n #############################################################\n \n for method in methods:\n res = calculator.pairwise_distance(data, n_cpus=16, method=method)\n res = np.nan_to_num(res,copy=False)\n df = pd.DataFrame(res,index=idx,columns=idx)\n df = df.astype('float32')\n df.to_pickle('./data/%s_%s.cfg' % (tag, method), compression = 'gzip')\n\n\n\n\nif __name__ == '__main__':\n \n #discriptors distance\n dfx = pd.read_csv('./molecule_open_data/candidate_train.csv')\n dfx = dfx.set_index('id')\n\n data = dfx.values\n idx = dfx.columns\n \n tag = 'feature'\n caldis(data, idx, tag, methods = ['correlation', 'cosine'])\n","repo_name":"shenwanxiang/biendata","sub_path":"molmaps/config/00_distances_calculation.py","file_name":"00_distances_calculation.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72489629066","text":"#!/usr/bin/python3\n\"\"\" 3-main LRU Cache \"\"\"\n\nBaseCaching = __import__('base_caching').BaseCaching\n\n\nclass MRUCache(BaseCaching):\n \"\"\"\n Create a class MRUCache that inherits from BaseCaching\n and is a caching system:\n \"\"\"\n list = []\n dict = {}\n\n def __init__(self):\n \"\"\" Init instance BaseCaching \"\"\"\n super().__init__()\n\n def put(self, key, item):\n \"\"\"\n Must assign to the dictionary self.cache_data the\n item value for the key key.\n \"\"\"\n if key is None or item is None:\n return\n self.cache_data[key] = item\n\n if len(self.cache_data) > self.MAX_ITEMS:\n \"\"\" find key by value \"\"\"\n for k, v in self.dict.items():\n if v == 1:\n to_remove = k\n del self.cache_data[to_remove]\n break\n\n self.dict = self.cache_data.copy()\n self.list = list(self.dict.keys())\n for i in self.list:\n self.dict[i] = 0\n self.dict[key] += 1\n print(\"DISCARD: {}\".format(to_remove))\n\n def get(self, key):\n \"\"\"\n Must return the value in self.cache_data linked to key.\n \"\"\"\n if key is None or key not in self.cache_data:\n return None\n self.dict = self.cache_data.copy()\n self.list = list(self.dict.keys())\n for i in self.list:\n self.dict[i] = 0\n self.dict[key] += 1\n return self.cache_data[key]\n","repo_name":"Diego-Guarise/holbertonschool-backend","sub_path":"0x01-caching/4-mru_cache.py","file_name":"4-mru_cache.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12473311594","text":"from functools import lru_cache\nfrom typing import Union\n\n\n@lru_cache\ndef soma(\n valor_1: Union[str, int, float], valor_2: Union[str, int, float]\n) -> Union[str, int, float]:\n \"\"\"Sum two values.\n :param - valor_1: first value\n - valor_2: second value\n :return - Sum of valor_1 and valor_2 only\n \"\"\"\n soma_dois_valores = valor_1 + valor_2\n return soma_dois_valores\n\n\nSOMA_DOIS_NUMEROS = soma(1, 2.0524899927999035)\nSOMA_DUAS_STRINGS = soma(\"Mar\", \"ia\")\n\nprint(SOMA_DOIS_NUMEROS)\nprint(SOMA_DUAS_STRINGS)\n","repo_name":"joscelino/Preparacao_Ambiente_Python","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41910261273","text":"from contextlib import asynccontextmanager\nfrom fastapi import FastAPI\n\nfrom app.config import config\nfrom app.services.database import sessionmanager\n\n\ndef init_app(init_db=True):\n lifespan = None\n\n if init_db:\n sessionmanager.init(config.DB_CONFIG)\n\n @asynccontextmanager\n async def lifespan(app: FastAPI):\n yield\n if sessionmanager._engine is not None:\n await sessionmanager.close()\n\n server = FastAPI(title=\"FastAPI server\", lifespan=lifespan)\n\n from app.views.user import router as user_router\n\n server.include_router(user_router, prefix=\"/api\", tags=[\"user\"])\n\n return server\n","repo_name":"gpkc/fastapi-sqlalchemy-pytest","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"81"} +{"seq_id":"17327362719","text":"import numpy as np\n\nfrom copy import deepcopy\n\n \ndef align_canonical_forms(parameters_1, parameters_2, inplace=False):\n \"\"\"Align domains and corresponding attributes of canonical parameter dictionary\n Args :\n parameters_1 (dict) :\n First dictionary containing canonical parameters\n parameters_2 (dict) :\n Second dictionary containing canonical parameters\n inplace (bool) :\n Boolean value indicating whether alignment should happen inplace\n Take note that both dictionaries can be altered\n Return:\n list :\n List containing two parameter dictionaries\n \"\"\"\n\n from pypgm.factor import vector_normal\n \n # Create copy of input parameters if necessary\n if not(inplace):\n parameters_1 = deepcopy(parameters_1)\n parameters_2 = deepcopy(parameters_2)\n\n # Obtain index representation of variables\n vars_1 = np.array(parameters_1['vars'])\n vars_2 = np.array(parameters_2['vars'])\n full_vars = np.unique(np.append(vars_1, vars_2))\n full_dim = len(full_vars)\n var_ind_1 = np.searchsorted(full_vars, vars_1)\n var_ind_2 = np.searchsorted(full_vars, vars_2)\n\n # Align wmean vectors\n wmean_1 = np.zeros(full_dim)\n wmean_2 = np.zeros(full_dim)\n wmean_1[var_ind_1] = parameters_1['wmean']\n wmean_2[var_ind_2] = parameters_2['wmean']\n parameters_1['wmean'] = wmean_1.tolist()\n parameters_2['wmean'] = wmean_2.tolist()\n\n # Obtain covariance form of parameters\n cov_form = parameters_1['cov_form']\n\n # Align precision based on covariance form of parameters \n if cov_form == vector_normal.COVARIANCE_FORM.COMMON:\n pass\n \n elif cov_form == vector_normal.COVARIANCE_FORM.DIAGONAL:\n prec_1 = np.zeros(full_dim)\n prec_2 = np.zeros(full_dim)\n prec_1[var_ind_1] = parameters_1['prec']\n prec_2[var_ind_2] = parameters_2['prec']\n parameters_1['prec'] = prec_1.tolist()\n parameters_2['prec'] = prec_2.tolist()\n \n elif cov_form == vector_normal.COVARIANCE_FORM.FULL:\n prec_1 = np.zeros((full_dim,full_dim))\n prec_2 = np.zeros((full_dim,full_dim))\n prec_1[np.ix_(var_ind_1,var_ind_1)] = parameters_1['prec']\n prec_2[np.ix_(var_ind_2,var_ind_2)] = parameters_2['prec']\n parameters_1['prec'] = prec_1.tolist()\n parameters_2['prec'] = prec_2.tolist()\n\n else:\n raise Exception('Parameter dictionary has invalid covariance form attribute')\n\n # Update variables attribute\n parameters_1['vars'] = full_vars.tolist()\n parameters_2['vars'] = full_vars.tolist()\n\n return [parameters_1, parameters_2]\n\n\ndef multiply_canonical_forms(parameters_1, parameters_2, inplace=False):\n \"\"\"Multiply two canonical parameter dictionaries\n Args :\n parameters_1 (dict) :\n First dictionary containing canonical parameters\n parameters_2 (dict) :\n Second dictionary containing canonical parameters\n inplace (bool) :\n Boolean value indicating whether multiplication should happen inplace\n Take note that both dictionaries can be altered\n Returns :\n (dict) :\n Dictionary containing canonical parameters of product\n \"\"\"\n\n # Create copy of input parameters if necessary\n if not(inplace):\n parameters_1 = deepcopy(parameters_1)\n parameters_2 = deepcopy(parameters_2)\n\n # Ensure parameters are aligned\n align_canonical_forms(parameters_1, parameters_2, inplace=True)\n\n # Add wmean attributes\n parameters_1['wmean'] = np.array(parameters_1['wmean']) + np.array(parameters_2['wmean'])\n parameters_1['wmean'] = parameters_1['wmean'].tolist()\n\n # Add precision attributes\n parameters_1['prec'] = np.array(parameters_1['prec']) + np.array(parameters_2['prec'])\n parameters_1['prec'] = parameters_1['prec'].tolist()\n\n return parameters_1\n\n\ndef inpl_multiply_canonical_forms(parameters_1, parameters_2):\n return multiply_canonical_forms(parameters_1, parameters_2, inplace=True)\n\n\ndef divide_canonical_forms(parameters_1, parameters_2, inplace=False):\n \"\"\"Divide two canonical parameter dictionaries\n Args :\n parameters_1 (dict) :\n First dictionary containing canonical parameters\n parameters_2 (dict) :\n Second dictionary containing canonical parameters\n inplace (bool) :\n Boolean value indicating whether division should happen inplace\n Take note that both dictionaries can be altered\n Returns :\n (dict) :\n Dictionary containing canonical parameters of quotient\n \"\"\"\n\n # Create copy of input parameters if necessary\n if not(inplace):\n parameters_1 = deepcopy(parameters_1)\n parameters_2 = deepcopy(parameters_2)\n\n # Ensure parameters are aligned\n align_canonical_forms(parameters_1, parameters_2, inplace=True)\n\n # Subtract wmean attributes\n parameters_1['wmean'] = np.array(parameters_1['wmean']) - np.array(parameters_2['wmean'])\n parameters_1['wmean'] = parameters_1['wmean'].tolist()\n\n # Subtract precision attributes\n parameters_1['prec'] = np.array(parameters_1['prec']) - np.array(parameters_2['prec'])\n parameters_1['prec'] = parameters_1['prec'].tolist()\n\n return parameters_1\n\n\ndef inpl_divide_canonical_forms(parameters_1, parameters_2):\n return divide_canonical_forms(parameters_1, parameters_2, inplace=True)\n\n\ndef marginalize_canonical_form(parameters, domain, inplace=False):\n \"\"\"Marginalize canonical parameters to subset of the domain\n Args :\n parameters (dict) :\n Dictionary containing canonical parameters\n domain (list) :\n List of variables to marginalize to\n inplace (bool) :\n Boolean value indicating whether marginalization should happen inplace\n Returns :\n (dict) :\n Dictionary containing canonical parameters of marginal\n \"\"\"\n\n from pypgm.factor import vector_normal\n\n # Create copy of input parameters if necessary\n if not(inplace):\n parameters = deepcopy(parameters)\n \n # Covert canonical form to standard form\n vector_normal.canonical_to_standard(parameters, inplace=True)\n\n # Marginalize standard form\n vector_normal.marginalize_standard_form(parameters, domain, inplace=True)\n\n # Covert standard form to canonical form\n vector_normal.standard_to_canonical(parameters, inplace=True)\n\n return parameters\n\n\ndef inpl_marginalize_canonical_form(parameters, domain):\n return marginalize_canonical_form(parameters, domain, inplace=True)\n","repo_name":"djpbadenhorst/pers-phd","sub_path":"src/pypgm/factor/vector_normal/canonical.py","file_name":"canonical.py","file_ext":"py","file_size_in_byte":6625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3612865856","text":"import os\nimport sys\nimport warnings\nfrom functools import partial\n\nfrom bayes_opt import BayesianOptimization\nfrom keras import optimizers\nfrom keras_contrib.losses import crf_loss\nfrom keras_contrib.metrics import crf_accuracy\n\n# from keras import losses\n\nTHIS_DIR = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.dirname(THIS_DIR))\nfrom model.bilstm_crf import BLSTMCRF\nfrom model.config import Config\nfrom model.data_utils import TrainDevData\n\nverbose = 1\n\nwarnings.filterwarnings('ignore')\n\n\ndef get_model():\n model = BLSTMCRF(Config())\n model.build()\n return model\n\n\ndef fit_with(verbose, dropout2_rate, lr, epoch_drop, lr_decay):\n # Create the model using a specified hyper parameters.\n print(\"==================================================>\")\n print(dropout2_rate, lr, epoch_drop, lr_decay)\n print(\"<==================================================\")\n model = get_model()\n model.config.dropout = dropout2_rate\n model.config.lr = lr\n model.config.epoch_drop = epoch_drop\n model.config.lr_decay = lr_decay\n\n # Train the model for a specified number of epochs.\n optimizer = optimizers.Adam(lr=lr)\n\n model.compile(loss=crf_loss,\n optimizer=optimizer,\n metrics=[crf_accuracy])\n\n # create data-sets\n data = TrainDevData(\n model.config.filename_train,\n model.config.processing_word,\n model.config.processing_tag,\n model.config.max_iter,\n model.config.train_validate_split\n )\n # train model\n history = model.train(data.train, data.validate)\n accuracy = max(history.history['val_crf_accuracy'])\n return accuracy\n\n\nfit_with_partial = partial(fit_with, verbose)\n\n# Bounded region of parameter space\npbounds = {'dropout2_rate': (0.4, 0.6), 'lr': (1e-3, 1e-2), \"epoch_drop\": (1, 4), \"lr_decay\": (0.8, 0.9)}\n\nb_optimizer = BayesianOptimization(\n f=fit_with_partial,\n pbounds=pbounds,\n verbose=2, # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent\n random_state=1,\n)\n\nb_optimizer.maximize(init_points=10, n_iter=10)\n\nfor i, res in enumerate(b_optimizer.res):\n print(\"Iteration {}: \\n\\t{}\".format(i, res))\n\nprint(b_optimizer.max)\n","repo_name":"dpakpdl/NepaliNLP","sub_path":"Optimization/bayesian_optimizer.py","file_name":"bayesian_optimizer.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72176811146","text":"import torch\nfrom torch import optim as optim\nfrom torch.optim import Optimizer\n\nclass Optimizers(object):\n \"\"\"Handles a list of optimizers.\"\"\"\n\n def __init__(self, *ops):\n self.optimizers = ops\n\n def zero_grad(self):\n for op in self.optimizers:\n op.zero_grad()\n\n def step(self):\n \"\"\"Makes all optimizers update their params.\"\"\"\n for optimizer in self.optimizers:\n optimizer.step()\n\ndef build_optimizer(config, model):\n \"\"\"\n Build optimizer, set weight decay of normalization to 0 by default.\n \"\"\"\n skip = {}\n skip_keywords = {}\n if hasattr(model, 'no_weight_decay'):\n skip = model.no_weight_decay()\n if hasattr(model, 'no_weight_decay_keywords'):\n skip_keywords = model.no_weight_decay_keywords()\n parameters = set_weight_decay(model, skip, skip_keywords)\n\n opt_lower = config.TRAIN.OPTIMIZER.NAME.lower()\n optimizer = None\n if opt_lower == 'sgd':\n optimizer = optim.SGD(parameters, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True,\n lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)\n elif opt_lower == 'adamw':\n optimizer = optim.AdamW(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS,\n lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)\n elif opt_lower == \"adamax\":\n optimizer = AdamaxW(parameters, lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)\n\n return optimizer\n\n\ndef set_weight_decay(model, skip_list=(), skip_keywords=()):\n has_decay = []\n no_decay = []\n\n for name, param in model.named_parameters():\n if not param.requires_grad:\n continue # frozen weights\n if len(param.shape) == 1 or name.endswith(\".bias\") or (name in skip_list) or \\\n check_keywords_in_name(name, skip_keywords):\n no_decay.append(param)\n # print(f\"{name} has no weight decay\")\n else:\n has_decay.append(param)\n return [{'params': has_decay},\n {'params': no_decay, 'weight_decay': 0.}]\n\n\ndef check_keywords_in_name(name, keywords=()):\n isin = False\n for keyword in keywords:\n if keyword in name:\n isin = True\n return isin\n\nclass AdamaxW(Optimizer):\n r\"\"\"Implements AdamaxW algorithm.\n The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.\n The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay coefficient (default: 1e-2)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n .. _Adamax\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _Decoupled Weight Decay Regularization:\n https://arxiv.org/abs/1711.05101\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n \"\"\"\n\n # Learning rate/Weight decay are different for Swin\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=1e-2, amsgrad=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, amsgrad=amsgrad)\n super(AdamaxW, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(AdamaxW, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('amsgrad', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n\n # Perform step-weight decay\n p.data.mul_(1 - group['lr'] * group['weight_decay'])\n\n # Perform optimization step\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n amsgrad = group['amsgrad']\n\n state = self.state[p]\n eps = group['eps']\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of inf gradient values\n state['exp_inf'] = torch.zeros_like(p.data)\n\n exp_avg, exp_inf = state['exp_avg'], state['exp_inf']\n beta1, beta2 = group['betas']\n state['step'] += 1\n # Decay the first and inf moment running average coefficient\n exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)\n\n norm_buf = torch.cat([\n exp_inf.mul_(beta2).unsqueeze(0),\n grad.abs().add_(eps).unsqueeze_(0)\n ], 0)\n torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long()))\n\n bias_correction = 1 - beta1 ** state['step']\n clr = group['lr'] / bias_correction\n p.data.addcdiv_(exp_avg, exp_inf, value=-clr)\n\n return loss","repo_name":"IemProg/MiMi","sub_path":"optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":6592,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"9645933301","text":"# -*- conding:utf-8 -*-\n#Author:lyc\nimport os, sys\n\nwords=[\n'look','into','my','eyes','look','into','my','eyes',\n'the','eyes','the','eyes','the','eyes','not','around','the',\n'eyes',\"don't\",'look','around','the','eyes','look','into',\n'my','eyes',\"you're\",'under'\n]\nfrom collections import Counter\nword_counts=Counter(words)\n#出现频率最高的3个单词\ntop_three=word_counts.most_common(3)\nprint(top_three)\n#Outputs[('eyes',8),('the',5),('look',4)]\n\n\n# Counter的结果可以加减运算\na=Counter(words)\nb=Counter(morewords)\na\nCounter({'eyes':8,'the':5,'look':4,'into':3,'my':3,'around':2,\n\"you're\":1,\"don't\":1,'under':1,'not':1})\nb\nCounter({'eyes':1,'looking':1,'are':1,'in':1,'not':1,'you':1,\n'my':1,'why':1})\n#Combinecounts\nc=a+b\nc\nCounter({'eyes':9,'the':5,'look':4,'my':4,'into':3,'not':2,\n'around':2,\"you're\":1,\"don't\":1,'in':1,'why':1,\n'looking':1,'are':1,'under':1,'you':1})\n","repo_name":"g-lyc/PRACTICE","sub_path":"cookbook/数据结构处理/统计出现次数.py","file_name":"统计出现次数.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31409851656","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"\n==================================================\n@Project -> File :MxOnline -> YunPian\n@IDE :PyCharm\n@Author :jhong.tao\n@Date :2021/6/25 8:38\n@Desc :发送短信验证码\n==================================================\n\"\"\"\nimport requests\nimport json\n\n\ndef send_single_sms(apikey, code, mobile):\n # 发送单条短信\n url = 'https://sms.yunpian.com/v2/sms/single_send.json'\n text = '【蜗犇AI】您的验证码是{}'.format(code)\n res = requests.post(url, data={\n 'apikey': apikey,\n 'mobile': mobile,\n 'text': text\n })\n re_json = json.loads(res.text)\n return re_json\n\n\nif __name__ == '__main__':\n res = send_single_sms('e3a4560f4769ffef24077aa73ec8498f', '1234','18468120158')\n import json\n res_json = json.loads(res.text)\n code = res_json['code']\n msg = res_json['msg']\n if code == 0:\n print('发送成功')\n else:\n print('发送失败:{}'.format(msg))","repo_name":"jhong-tao/django2eduweb","sub_path":"MxOnline/apps/utils/YunPian.py","file_name":"YunPian.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39471730624","text":"#!/usr/bin/env python3.5\n\nstr_desc = \"\"\"\n __ _ _\n / _(_) | |\n _ __ | |_ _ ___ | |__\n| '_ \\| _| |/ _ \\| '_ \\\\\n| |_) | | | | (_) | | | |\n| .__/|_| |_|\\___/|_| |_|\n| |\n|_|\n\n\n\n A simple http file IO handler\n\n `pfioh' is a simple http-based file I/O handler/server allowing software\n agents to perform useful file transfers over http.\n\n `pfioh' handles HTTP REST-like requests on a given port -- it can accept\n incoming file data from a client, and can also return server-side file trees\n to a client.\n\n `pfioh' can also zip up/unzip file trees so that entire paths can be easily\n transferred.\n\n\"\"\"\n\nimport os\nimport sys\n\nfrom io import BytesIO as IO\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom socketserver import ThreadingMixIn\nimport socket\nimport argparse\nimport cgi\nimport zipfile\nimport json\nimport base64\nimport zipfile\nimport uuid\nimport urllib\nimport ast\nimport shutil\nimport datetime\n\n# pman local dependencies\nfrom ._colors import Colors\nfrom .debug import debug\n\nclass StoreHandler(BaseHTTPRequestHandler):\n\n b_quiet = False\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n \"\"\"\n BaseHTTPRequestHandler.__init__(self, *args, **kwargs)\n\n def qprint(self, msg, **kwargs):\n\n str_comms = \"\"\n for k,v in kwargs.items():\n if k == 'comms': str_comms = v\n\n if not StoreHandler.b_quiet:\n if str_comms == 'status': print(Colors.PURPLE, end=\"\")\n if str_comms == 'error': print(Colors.RED, end=\"\")\n if str_comms == \"tx\": print(Colors.YELLOW + \"<----\")\n if str_comms == \"rx\": print(Colors.GREEN + \"---->\")\n print('%s' % datetime.datetime.now() + \" | \", end=\"\")\n print(msg)\n if str_comms == \"tx\": print(Colors.YELLOW + \"<----\")\n if str_comms == \"rx\": print(Colors.GREEN + \"---->\")\n print(Colors.NO_COLOUR, end=\"\")\n\n def do_GET_remoteStatus(self, d_msg, **kwargs):\n \"\"\"\n This method is used to get information about the remote\n server -- for example, is a remote directory/file valid?\n \"\"\"\n d_meta = d_msg['meta']\n d_remote = d_meta['remote']\n\n str_serverPath = d_remote['path']\n\n b_isFile = os.path.isfile(str_serverPath)\n b_isDir = os.path.isdir(str_serverPath)\n b_exists = os.path.exists(str_serverPath)\n\n d_ret = {\n 'status': b_exists,\n 'isfile': b_isFile,\n 'isdir': b_isDir\n }\n\n\n self.ret_client(d_ret)\n\n self.qprint(d_ret, comms = 'tx')\n\n return {'status': b_exists}\n\n def do_GET_withCompression(self, d_msg):\n \"\"\"\n Process a \"GET\" using zip/base64 encoding\n\n :return:\n \"\"\"\n\n # d_msg = ast.literal_eval(d_server)\n d_meta = d_msg['meta']\n d_local = d_meta['local']\n d_remote = d_meta['remote']\n d_transport = d_meta['transport']\n d_compress = d_transport['compress']\n d_ret = {}\n\n str_serverPath = d_remote['path']\n str_fileToProcess = str_serverPath\n\n b_cleanup = False\n b_zip = True\n\n str_encoding = 'base64'\n\n if 'cleanup' in d_compress: b_cleanup = d_compress['cleanup']\n\n str_archive = d_compress['archive']\n if str_archive == 'zip': b_zip = True\n else: b_zip = False\n if os.path.isdir(str_serverPath):\n b_zip = True\n str_archive = 'zip'\n\n # If specified (or if the target is a directory), create zip archive\n # of the local path\n if b_zip:\n self.qprint(\"Zipping target '%s'...\" % str_serverPath, comms = 'status')\n d_fio = zip_process(\n action = 'zip',\n path = str_serverPath,\n arcroot = str_serverPath\n )\n d_ret['zip'] = d_fio\n d_ret['status'] = d_fio['status']\n d_ret['msg'] = d_fio['msg']\n d_ret['timestamp'] = '%s' % datetime.datetime.now()\n if not d_ret['status']:\n self.qprint(\"An error occurred during the zip operation:\\n%s\" % d_ret['stdout'],\n comms = 'error')\n self.ret_client(d_ret)\n return d_ret\n\n str_fileToProcess = d_fio['fileProcessed']\n str_zipFile = str_fileToProcess\n d_ret['zip']['filesize'] = '%s' % os.stat(str_fileToProcess).st_size\n self.qprint(\"Zip file: \" + Colors.YELLOW + \"%s\" % str_zipFile +\n Colors.PURPLE + '...' , comms = 'status')\n\n # Encode possible binary filedata in base64 suitable for text-only\n # transmission.\n if 'encoding' in d_compress: str_encoding = d_compress['encoding']\n if str_encoding == 'base64':\n self.qprint(\"base64 encoding target '%s'...\" % str_fileToProcess,\n comms = 'status')\n d_fio = base64_process(\n action = 'encode',\n payloadFile = str_fileToProcess,\n saveToFile = str_fileToProcess + \".b64\"\n )\n d_ret['encode'] = d_fio\n d_ret['status'] = d_fio['status']\n d_ret['msg'] = d_fio['msg']\n d_ret['timestamp'] = '%s' % datetime.datetime.now()\n str_fileToProcess = d_fio['fileProcessed']\n d_ret['encoding'] = {}\n d_ret['encoding']['filesize'] = '%s' % os.stat(str_fileToProcess).st_size\n str_base64File = str_fileToProcess\n\n with open(str_fileToProcess, 'rb') as fh:\n filesize = os.stat(str_fileToProcess).st_size\n self.qprint(\"Transmitting \" + Colors.YELLOW + \"{:,}\".format(filesize) + Colors.PURPLE +\n \" target bytes from \" + Colors.YELLOW +\n \"%s\" % (str_fileToProcess) + Colors.PURPLE + '...', comms = 'status')\n self.send_response(200)\n # self.send_header('Content-type', 'text/json')\n self.end_headers()\n # try:\n # self.wfile.write(fh.read().encode())\n # except:\n self.qprint('<transmission>', comms = 'tx')\n d_ret['transmit'] = {}\n d_ret['transmit']['msg'] = 'transmitting'\n d_ret['transmit']['timestamp'] = '%s' % datetime.datetime.now()\n d_ret['transmit']['filesize'] = '%s' % os.stat(str_fileToProcess).st_size\n d_ret['status'] = True\n d_ret['msg'] = d_ret['transmit']['msg']\n self.wfile.write(fh.read())\n\n if b_cleanup:\n if b_zip:\n self.qprint(\"Removing '%s'...\" % (str_zipFile), comms = 'status')\n if os.path.isfile(str_zipFile): os.remove(str_zipFile)\n if str_encoding == 'base64':\n self.qprint(\"Removing '%s'...\" % (str_base64File), comms = 'status')\n if os.path.isfile(str_base64File): os.remove(str_base64File)\n\n\n self.ret_client(d_ret)\n self.qprint(d_ret, comms = 'tx')\n\n return d_ret\n\n def do_GET_withCopy(self, d_msg):\n \"\"\"\n Process a \"GET\" using copy operations\n\n :return:\n \"\"\"\n\n d_meta = d_msg['meta']\n d_local = d_meta['local']\n d_remote = d_meta['remote']\n d_transport = d_meta['transport']\n d_copy = d_transport['copy']\n\n str_serverPath = d_remote['path']\n str_clientPath = d_local['path']\n str_fileToProcess = str_serverPath\n\n b_copyTree = False\n b_copyFile = False\n\n d_ret = {}\n d_ret['status'] = True\n\n if not d_copy['symlink']:\n if os.path.isdir(str_serverPath):\n b_copyTree = True\n str_serverNode = str_serverPath.split('/')[-1]\n try:\n shutil.copytree(str_serverPath, os.path.join(str_clientPath, str_serverNode))\n except BaseException as e:\n d_ret['status'] = False\n d_ret['msg'] = str(e)\n if os.path.isfile(str_serverPath):\n b_copyFile = True\n shutil.copy2(str_serverPath, str_clientPath)\n if d_copy['symlink']:\n str_serverNode = str_serverPath.split('/')[-1]\n try:\n os.symlink(str_serverPath, os.path.join(str_clientPath, str_serverNode))\n b_symlink = True\n except BaseException as e:\n d_ret['status'] = False\n d_ret['msg'] = str(e)\n b_symlink = False\n\n d_ret['source'] = str_serverPath\n d_ret['destination'] = str_clientPath\n d_ret['copytree'] = b_copyTree\n d_ret['copyfile'] = b_copyFile\n d_ret['symlink'] = b_symlink\n d_ret['timestamp'] = '%s' % datetime.datetime.now()\n\n self.ret_client(d_ret)\n\n return d_ret\n\n def log_message(self, format, *args):\n \"\"\"\n This silences the server from spewing to stdout!\n \"\"\"\n return\n\n def do_GET(self):\n\n d_server = dict(urllib.parse.parse_qsl(urllib.parse.urlsplit(self.path).query))\n d_meta = ast.literal_eval(d_server['meta'])\n\n d_msg = {}\n d_msg['action'] = d_server['action']\n d_msg['meta'] = d_meta\n d_transport = d_meta['transport']\n\n self.qprint(self.path, comms = 'rx')\n\n if 'checkRemote' in d_transport and d_transport['checkRemote']:\n self.qprint('Getting status on server filesystem...', comms = 'status')\n d_ret = self.do_GET_remoteStatus(d_msg)\n return d_ret\n\n if 'compress' in d_transport:\n d_ret = self.do_GET_withCompression(d_msg)\n return d_ret\n\n if 'copy' in d_transport:\n d_ret = self.do_GET_withCopy(d_msg)\n return d_ret\n\n def form_get(self, str_verb, data):\n \"\"\"\n Returns a form from cgi.FieldStorage\n \"\"\"\n return cgi.FieldStorage(\n IO(data),\n headers = self.headers,\n environ =\n {\n 'REQUEST_METHOD': str_verb,\n 'CONTENT_TYPE': self.headers['Content-Type'],\n }\n )\n\n def do_POST(self):\n\n # Parse the form data posted\n\n self.qprint(str(self.headers), comms = 'rx')\n\n length = self.headers['content-length']\n data = self.rfile.read(int(length))\n form = self.form_get('POST', data)\n d_form = {}\n d_ret = {\n 'msg': 'In do_POST',\n 'status': True,\n 'formsize': sys.getsizeof(form)\n }\n\n for key in form:\n d_form[key] = form.getvalue(key)\n\n # d_msg = json.loads(ast.literal_eval(d_form['d_msg']))\n d_msg = json.loads((d_form['d_msg']))\n d_meta = d_msg['meta']\n\n if 'ctl' in d_meta:\n self.do_POST_serverctl(d_meta)\n\n if 'transport' in d_meta:\n d_transport = d_meta['transport']\n if 'compress' in d_transport:\n d_ret = self.do_POST_withCompression(\n data = data,\n length = length,\n form = form,\n d_form = d_form\n )\n if 'copy' in d_transport:\n d_ret = self.do_POST_withCopy(d_meta)\n\n self.ret_client(d_ret)\n return d_ret\n\n def do_POST_serverctl(self, d_meta):\n \"\"\"\n \"\"\"\n d_ctl = d_meta['ctl']\n self.qprint('Processing server ctl...', comms = 'status')\n self.qprint(d_meta, comms = 'rx')\n if 'serverCmd' in d_ctl:\n if d_ctl['serverCmd'] == 'quit':\n self.qprint('Shutting down server', comms = 'status')\n d_ret = {\n 'msg': 'Server shut down',\n 'status': True\n }\n self.qprint(d_ret, comms = 'tx')\n self.ret_client(d_ret)\n os._exit(0)\n\n def do_POST_withCopy(self, d_meta):\n \"\"\"\n Process a \"POST\" using copy operations\n\n :return:\n \"\"\"\n\n # d_meta = d_msg['meta']\n d_local = d_meta['local']\n d_remote = d_meta['remote']\n d_transport = d_meta['transport']\n d_copy = d_transport['copy']\n\n str_serverPath = d_remote['path']\n str_clientPath = d_local['path']\n str_fileToProcess = str_serverPath\n\n b_copyTree = False\n b_copyFile = False\n\n d_ret = {}\n d_ret['status'] = True\n\n if not d_copy['symlink']:\n if os.path.isdir(str_clientPath):\n b_copyTree = True\n str_clientNode = str_clientPath.split('/')[-1]\n try:\n shutil.copytree(str_clientPath, os.path.join(str_serverPath, str_clientNode))\n except BaseException as e:\n d_ret['status'] = False\n d_ret['msg'] = str(e)\n if os.path.isfile(str_clientPath):\n b_copyFile = True\n shutil.copy2(str_clientPath, str_serverPath)\n d_ret['copytree'] = b_copyTree\n d_ret['copyfile'] = b_copyFile\n if d_copy['symlink']:\n str_clientNode = str_clientPath.split('/')[-1]\n try:\n os.symlink(str_clientPath, os.path.join(str_serverPath, str_clientNode))\n except BaseException as e:\n d_ret['status'] = False\n d_ret['msg'] = str(e)\n d_ret['symlink'] = 'ln -s %s %s' % (str_clientPath, str_serverPath)\n\n # d_ret['d_meta'] = d_meta\n d_ret['source'] = str_clientPath\n d_ret['destination'] = str_serverPath\n d_ret['copytree'] = b_copyTree\n d_ret['copyfile'] = b_copyFile\n d_ret['timestamp'] = '%s' % datetime.datetime.now()\n\n # self.ret_client(d_ret)\n\n return d_ret\n\n def do_POST_withCompression(self, **kwargs):\n\n # Parse the form data posted\n\n self.qprint(str(self.headers), comms = 'rx')\n self.qprint('do_POST_withCompression()', comms = 'status')\n\n data = None\n length = 0\n form = None\n d_form = {}\n d_ret = {}\n\n for k,v in kwargs.items():\n if k == 'data': data = v\n if k == 'length': length = v\n if k == 'form': form = v\n if k == 'd_form': d_form = v\n\n\n d_msg = json.loads((d_form['d_msg']))\n d_meta = d_msg['meta']\n #\n # d_meta = json.loads(d_form['d_meta'])\n fileContent = d_form['local']\n str_fileName = d_meta['local']['path']\n str_encoding = d_form['encoding']\n\n d_remote = d_meta['remote']\n b_unpack = False\n # b_serverPath = False\n str_unpackBase = self.server.str_fileBase\n if 'path' in d_remote:\n str_unpackPath = d_remote['path']\n str_unpackBase = str_unpackPath + '/'\n\n d_transport = d_meta['transport']\n d_compress = d_transport['compress']\n if 'unpack' in d_compress:\n b_unpack = d_compress['unpack']\n\n str_fileOnly = os.path.split(str_fileName)[-1]\n str_fileSuffix = \"\"\n if d_compress['archive'] == \"zip\":\n str_fileSuffix = \".zip\"\n\n str_localFile = \"%s%s%s\" % (str_unpackBase, str_fileOnly, str_fileSuffix)\n\n if str_encoding == \"base64\":\n d_ret['decode'] = {}\n data = base64.b64decode(fileContent)\n try:\n with open(str_localFile, 'wb') as fh:\n fh.write(data)\n except:\n d_ret['decode']['status'] = False\n d_ret['decode']['msg'] = 'base64 decode unsuccessful!'\n\n self.ret_client(d_ret)\n self.qprint(d_ret, comms = 'tx')\n return d_ret\n else:\n d_ret['write'] = {}\n with open(str_localFile, 'wb') as fh:\n try:\n fh.write(fileContent.decode())\n d_ret['write']['decode'] = True\n except:\n fh.write(fileContent)\n d_ret['write']['decode'] = False\n d_ret['write']['file'] = str_localFile\n d_ret['write']['status'] = True\n d_ret['write']['msg'] = 'File written successfully!'\n d_ret['write']['filesize'] = \"{:,}\".format(os.stat(str_localFile).st_size)\n d_ret['status'] = True\n d_ret['msg'] = d_ret['write']['msg']\n fh.close()\n if b_unpack and d_compress['archive'] == 'zip':\n d_fio = zip_process(action = 'unzip',\n path = str_unpackPath,\n payloadFile = str_localFile)\n d_ret['unzip'] = d_fio\n d_ret['status'] = d_fio['status']\n d_ret['msg'] = d_fio['msg']\n os.remove(str_localFile)\n\n self.send_response(200)\n self.end_headers()\n\n d_ret['User-agent'] = self.headers['user-agent']\n\n # self.ret_client(d_ret)\n self.qprint(d_ret, comms = 'tx')\n\n return d_ret\n\n def ret_client(self, d_ret):\n \"\"\"\n Simply \"writes\" the d_ret using json and the client wfile.\n\n :param d_ret:\n :return:\n \"\"\"\n self.wfile.write(json.dumps(d_ret).encode())\n\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n \"\"\"\n Handle requests in a separate thread.\n \"\"\"\n\n def col2_print(self, str_left, str_right):\n print(Colors.WHITE +\n ('%*s' % (self.LC, str_left)), end='')\n print(Colors.LIGHT_BLUE +\n ('%*s' % (self.RC, str_right)) + Colors.NO_COLOUR)\n\n def setup(self, **kwargs):\n self.str_fileBase = \"received-\"\n self.LC = 40\n self.RC = 40\n\n self.str_unpackDir = \"/tmp/unpack\"\n self.b_removeZip = False\n self.args = None\n\n self.dp = debug(verbosity=0, level=-1)\n\n for k,v in kwargs.items():\n if k == 'args': self.args = v\n\n print(Colors.LIGHT_CYAN + str_desc)\n\n self.col2_print(\"Listening on address:\", self.args['ip'])\n self.col2_print(\"Listening on port:\", self.args['port'])\n self.col2_print(\"Server listen forever:\", self.args['b_forever'])\n print(Colors.LIGHT_GREEN + \"\\n\\n\\tWaiting for incoming data...\" + Colors.NO_COLOUR)\n\ndef zipdir(path, ziph, **kwargs):\n \"\"\"\n Zip up a directory.\n\n :param path:\n :param ziph:\n :param kwargs:\n :return:\n \"\"\"\n str_arcroot = \"\"\n for k, v in kwargs.items():\n if k == 'arcroot': str_arcroot = v\n\n for root, dirs, files in os.walk(path):\n for file in files:\n str_arcfile = os.path.join(root, file)\n if len(str_arcroot):\n str_arcname = str_arcroot.split('/')[-1] + str_arcfile.split(str_arcroot)[1]\n else:\n str_arcname = str_arcfile\n try:\n ziph.write(str_arcfile, arcname = str_arcname)\n except:\n print(\"Skipping %s\" % str_arcfile)\n\ndef zip_process(**kwargs):\n \"\"\"\n Process zip operations.\n\n :param kwargs:\n :return:\n \"\"\"\n\n str_localPath = \"\"\n str_zipFileName = \"\"\n str_action = \"zip\"\n str_arcroot = \"\"\n for k,v in kwargs.items():\n if k == 'path': str_localPath = v\n if k == 'action': str_action = v\n if k == 'payloadFile': str_zipFileName = v\n if k == 'arcroot': str_arcroot = v\n\n if str_action == 'zip':\n str_mode = 'w'\n str_zipFileName = '%s.zip' % uuid.uuid4()\n else:\n str_mode = 'r'\n\n ziphandler = zipfile.ZipFile(str_zipFileName, str_mode, zipfile.ZIP_DEFLATED)\n if str_mode == 'w':\n if os.path.isdir(str_localPath):\n zipdir(str_localPath, ziphandler, arcroot = str_arcroot)\n else:\n if len(str_arcroot):\n str_arcname = str_arcroot.split('/')[-1] + str_localPath.split(str_arcroot)[1]\n else:\n str_arcname = str_localPath\n try:\n ziphandler.write(str_localPath, arcname = str_arcname)\n except:\n ziphandler.close()\n os.remove(str_zipFileName)\n return {\n 'msg': json.dumps({\"msg\": \"No file or directory found for '%s'\" % str_localPath}),\n 'status': False\n }\n if str_mode == 'r':\n ziphandler.extractall(str_localPath)\n ziphandler.close()\n return {\n 'msg': '%s operation successful' % str_action,\n 'fileProcessed': str_zipFileName,\n 'status': True,\n 'path': str_localPath,\n 'zipmode': str_mode,\n 'filesize': \"{:,}\".format(os.stat(str_zipFileName).st_size)\n }\n\ndef base64_process(**kwargs):\n \"\"\"\n Process base64 file io\n \"\"\"\n\n str_fileToSave = \"\"\n str_action = \"encode\"\n data = None\n\n for k,v in kwargs.items():\n if k == 'action': str_action = v\n if k == 'payloadBytes': data = v\n if k == 'payloadFile': str_fileToRead = v\n if k == 'saveToFile': str_fileToSave = v\n if k == 'sourcePath': str_sourcePath = v\n\n if str_action == \"encode\":\n # Encode the contents of the file at targetPath as ASCII for transmission\n if len(str_fileToRead):\n with open(str_fileToRead, 'rb') as f:\n data = f.read()\n f.close()\n data_b64 = base64.b64encode(data)\n with open(str_fileToSave, 'wb') as f:\n f.write(data_b64)\n f.close()\n return {\n 'msg': 'Encode successful',\n 'fileProcessed': str_fileToSave,\n 'status': True\n # 'encodedBytes': data_b64\n }\n\n if str_action == \"decode\":\n bytes_decoded = base64.b64decode(data)\n with open(str_fileToSave, 'wb') as f:\n f.write(bytes_decoded)\n f.close()\n return {\n 'msg': 'Decode successful',\n 'fileProcessed': str_fileToSave,\n 'status': True\n # 'decodedBytes': bytes_decoded\n }","repo_name":"FNNDSC/uman","sub_path":"pman/pfioh.py","file_name":"pfioh.py","file_ext":"py","file_size_in_byte":23850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19236100722","text":"#three loops\n#first umber that gets printed and how it gets controlled\ntop= 5\nfor i in range (0,top + 1):\n for x in range (top-i, 0,-1):\n #prints the number\n print(x, end = ' ')\n #printe the space between the lines\n print()\n\n#prime number problem\n#sets start and finish\nstart = 25\nend = 50\n#controlls hw long to run the lines\nfor i in range(start,end):\n if i>1:\n for j in range (2,i):\n if(i % j==0):\n break\n else:\n print(i,end = ' ')\nprint()\nprint()\n\n#sets start varuable\nx=0\ny=1\n#how long you want the program to repete\nfor number in range(5):\n #prints both varuables\n print (x, end = ' ')\n print (y, end = ' ')\n #adds varuables so they can increse but the last number\n x=x+y\n y=x+y\n","repo_name":"Dwalker1000/GameDesign","sub_path":"Unit 1/loopingHW.py","file_name":"loopingHW.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12439350263","text":"import sys\nimport re\nfrom collections import defaultdict\n\n\n# opens text file and look at each char, reconstructs text and returns a list of tokens. \n# runtime: O(n^2) as there is a double for loop checking the validity of each char.\ndef tokenize(s):\n\n tokens = []\n raw_tokens = re.split(r'[^\\w\\d]+', s)\n\n for text in raw_tokens:\n temp = ''\n for char in text:\n if re.match(\"^[A-Za-z0-9]*$\", char):\n temp += char\n if len(temp) > 0:\n tokens.append(temp.lower())\n \n return tokens\n\n\n# count the numbers of each word and returns them in a dictionary \n# runtime: O(n) looks through the list of tokens once\ndef computeWordFrequencies(token_list):\n \n token_count = defaultdict(int)\n\n for t in token_list:\n token_count[t] += 1\n \n return token_count\n\n\n# sorts the dictionary by value decending\n# runtime: O(n) looks through the sorted list of tokens once\ndef frequencies(token_dict):\n\n sorted_token = sorted(token_dict.items(), key = lambda x:x[1], reverse = True)\n \n for pair in sorted_token:\n print(f'{pair[0]} {pair[1]}')\n \n\nif __name__ == \"__main__\":\n token_list = tokenize(1)\n token_dict = computeWordFrequencies(token_list)\n frequencies(token_dict)\n","repo_name":"BenjiBoy926/Inf141_tokenizer","sub_path":"PartA.py","file_name":"PartA.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22342227580","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def hasPathSum(self, root, sum):\n \"\"\"\n :type root: TreeNode\n :type sum: int\n :rtype: bool\n \"\"\"\n if not root:\n return False\n\n stack = [(root, root.val)]\n\n while stack:\n cnode = stack.pop()\n if not (cnode[0].left or cnode[0].right):\n if cnode[1] == sum:\n return True\n\n continue\n\n if cnode[0].left:\n stack.append((cnode[0].left, cnode[0].left.val + cnode[1]))\n\n if cnode[0].right:\n stack.append((cnode[0].right, cnode[0].right.val + cnode[1]))\n\n return False\n","repo_name":"MultiLi/leetcode","sub_path":"112_Path_Sum.py","file_name":"112_Path_Sum.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5614398018","text":"\"\"\"2.Escreva um programa que obtenha um nome de um arquivo texto do \nusuário e crie um processo para executar o programa do sistema Windows bloco de notas (notepad) para abrir o arquivo.\n\"\"\"\nimport os\n\nfilePath =os.path.dirname(os.path.abspath(__file__))\ninput_arq = input('Arquivo txt para procurar - ')\ninput_arqSplit = filePath+\"\\\\\"+input_arq+\".txt\"\n\nif os.path.exists(input_arqSplit):\n os.startfile(input_arqSplit)\nelse:\n print(\"Não se encontrou nenhum arquivo txt este nome\")","repo_name":"HenriqueSamii/Assessment-Desenvolvimento-Python-para-Redes-e-Sistemas-Operacionais","sub_path":"A2.py","file_name":"A2.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22342415880","text":"# Judging if the given input integer is a palindrome number without using extra\n# space.\n# The problem here is all negative integers are not palindrome\n\nclass Solution(object):\n def isPalindrome(self, x):\n \"\"\"\n :type x: int\n :rtype: bool\n \"\"\"\n if x < 0:\n return False\n\n exp = 0\n n = 1\n while x / n:\n n *= 10\n exp += 1\n\n n /= 10\n divider = n\n\n while exp > 1:\n if x / divider != x % ( n / divider * 10) / ( n / divider):\n return False\n x -= x/divider * divider\n divider /= 10\n exp -= 2\n\n return True\n","repo_name":"MultiLi/leetcode","sub_path":"9_Palindrome_Number.py","file_name":"9_Palindrome_Number.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18554121071","text":"import configs\r\nfrom Game_Sudoku import Game_Sudoku\r\n\r\n\r\ndef main(args):\r\n \"\"\"\r\n screen_width: Width of the form\r\n screen_height: Height of the form\r\n \"\"\"\r\n screen_width = args.screen_width\r\n screen_height = args.screen_height\r\n selected_width = args.selected_width\r\n selected_height = args.selected_height\r\n block_gap = args.block_gap\r\n block_size = args.block_size\r\n level = args.level\r\n\r\n game = Game_Sudoku(screen_width, screen_height, selected_width, selected_height,\r\n block_gap, block_size, level)\r\n game.SelectedForm()\r\n # game.Form()\r\n\r\n\r\nif __name__ == '__main__':\r\n args = configs.parse_args()\r\n main(args)","repo_name":"ningbojian/rjgc","sub_path":"a/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"16744700818","text":"from src.monthly.ingestor import Ingestor\nfrom src.monthly.dnn import DeepNeuralNetwork\nfrom tqdm import tqdm\n\n# Necessary variables.\nDATA_DIR = 'data'\nraw_feature_file = 'newFred.csv'\nraw_response_file = 'raw_returns.csv'\ntrans_feature_file = 'transformed_features.csv'\ntran_response_file = 'transformed_responses.csv'\n\n# Load raw feature datasets.\ning = Ingestor(trans_feature_file, tran_response_file, DATA_DIR, is_transformed=True)\ning.transform()\n# ing.save_transformations()\n\n# sw = D(feature_data=ing.feature_space_df, response_data=ing.response_df, size_of_train=12)\ndnn = DeepNeuralNetwork(ing.feature_space_df, ing.response_df)\ndnn.train()\n","repo_name":"dmottice20/principal-try-2","sub_path":"monthly_app.py","file_name":"monthly_app.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9919035464","text":"#!/usr/bin/env python\n\n####################################################################################################\n#\n# Node to control the movement of DENIRO's screen about the Z axis based on VIVE headset orientation\n#\n####################################################################################################\n\nimport rospy\nimport baxter_interface\nfrom baxter_interface import CHECK_VERSION\nimport tf\nimport numpy as np\nimport time\n\n####################################################################################################\n\n\nrospy.init_node(\"move_baxter_head\")\nhead = baxter_interface.Head() # Initialise head\nlistener = tf.TransformListener() # Creates a listener to obtain the transformation data between the tf frames.\naxis = 2 # Select Z axis\n\ndone = False\n\nif __name__ == '__main__':\n\twhile not rospy.is_shutdown():\n\t\ttry: # Attempt to obtain orientation for DENIRO's head based on the HMD tf\n\t\t\t_, quat = listener.lookupTransform('/hmd', 'cal_frame', rospy.Time(0))\n\t\t\tangle = tf.transformations.euler_from_quaternion(quat)[2]\n\n\t\t\t# Setting limits for DENIRO neck rotation.\n\t\t\tdiff = -angle\n\t\t\tthreshold = 1.3\n\t\t\tif diff < -threshold:\n\t\t\t\tdiff = -threshold\n\t\t\telif diff > threshold:\n\t\t\t\tdiff = threshold\n\n\t\t\t# Rotate head\n\t\t\thead.set_pan(diff, speed=1.0, timeout=0)\n\t\texcept (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n\t\t\tcontinue\n\t\ttime.sleep(0.1)","repo_name":"fc2115/Devito","sub_path":"head_pan.py","file_name":"head_pan.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3544749598","text":"import time\nimport csv\n\n# Display instructions\nprint('Press ENTER to begin timing. Afterwards, press ENTER to record each finisher time. Press Ctrl+C to quit.')\ninput() # press Enter to begin\nprint('Started')\nstartTime = time.time()\nfinishNum = 1\ntimes=[]\nfinishers=[]\n\n# start tracking\ntry:\n while True:\n input()\n finishTime = round(time.time()-startTime,3)\n print('Finisher #%s: %s' % (finishNum,finishTime),end='')\n times.append(finishTime)\n finishers.append(finishNum)\n finishNum += 1\nexcept KeyboardInterrupt:\n # Handle the Ctrl-C exception to prevent error from displaying\n print('\\nDone. \\nWriting to file \"results.csv\"')\n with open('results.csv','w') as file:\n writer = csv.writer(file, delimiter='\\t')\n writer.writerows(zip(finishers,times))\n \n \n\n \n","repo_name":"tomfaulkenberry/racetimer","sub_path":"racetimer-noGUI.py","file_name":"racetimer-noGUI.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10327471699","text":"\"\"\"\nType annotations for backup-gateway service literal definitions.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_backup_gateway/literals.html)\n\nUsage::\n\n ```python\n from mypy_boto3_backup_gateway.literals import GatewayTypeType\n\n data: GatewayTypeType = \"BACKUP_VM\"\n ```\n\"\"\"\nimport sys\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal\n\n__all__ = (\n \"GatewayTypeType\",\n \"HypervisorStateType\",\n \"ListGatewaysPaginatorName\",\n \"ListHypervisorsPaginatorName\",\n \"ListVirtualMachinesPaginatorName\",\n \"SyncMetadataStatusType\",\n)\n\nGatewayTypeType = Literal[\"BACKUP_VM\"]\nHypervisorStateType = Literal[\"ERROR\", \"OFFLINE\", \"ONLINE\", \"PENDING\"]\nListGatewaysPaginatorName = Literal[\"list_gateways\"]\nListHypervisorsPaginatorName = Literal[\"list_hypervisors\"]\nListVirtualMachinesPaginatorName = Literal[\"list_virtual_machines\"]\nSyncMetadataStatusType = Literal[\"CREATED\", \"FAILED\", \"PARTIALLY_FAILED\", \"RUNNING\", \"SUCCEEDED\"]\n","repo_name":"chrishollinworth/vscode-boto3-intellisense","sub_path":"typings/mypy_boto3_backup_gateway/literals.pyi","file_name":"literals.pyi","file_ext":"pyi","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18639294719","text":"import gym\nfrom skimage import transform \nimport numpy as np \nimport random\nfrom collections import deque, namedtuple\nimport torch\n\n\nclass ProcessObservation(gym.ObservationWrapper):\n '''Pre process observation by converting frame from RGB to grayscale and then resizing it to (84 x 84)\n '''\n def __init__(self, env):\n super(ProcessObservation, self).__init__(env)\n self.observation_space = gym.spaces.Box(low = 0, high = 255, shape = (84,84))\n \n def observation(self, color_frame):\n return ProcessObservation.rgb2grayScaled(color_frame)\n \n @staticmethod\n def rgb2grayScaled(color_frame):\n rgb_weights = [0.2989, 0.5870, 0.1140]\n gray_frame = np.dot(color_frame[...,:3], rgb_weights)\n cropped_frame = gray_frame[25:-12,4:-12]\n cropped_frame = transform.resize(cropped_frame, [84,84])\n return cropped_frame.astype(np.uint8)\n\nclass ClipRewardEnv(gym.RewardWrapper):\n def __init__(self, env):\n gym.RewardWrapper.__init__(self, env)\n\n def reward(self, reward):\n \"\"\"Bin reward to {+1, 0, -1} by its sign.\"\"\"\n return np.sign(reward).astype(np.int16)\n\n\n# Create our environment wrapper to properly skip the frames.\nclass MaxAndSkipEnv(gym.Wrapper):\n\n \"\"\"\n Each action of the agent is repeated over skip frames \n return only every `skip`-th frame\n \"\"\"\n \n def __init__(self, env, skip=3):\n super(MaxAndSkipEnv, self).__init__(env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = deque(maxlen=2)\n self._skip = skip\n\n def step(self, action):\n total_reward = 0\n for _ in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n self._obs_buffer.append(obs)\n total_reward += reward\n if done:\n break\n max_frame = np.max(np.stack(self._obs_buffer), axis=0)\n return max_frame, total_reward, done, info\n\n def reset(self):\n \"\"\"Clear past frame buffer and init to first obs\"\"\"\n self._obs_buffer.clear()\n obs = self.env.reset()\n self._obs_buffer.append(obs)\n return obs\n\nclass LazyFrames(object):\n def __init__(self, frames):\n \"\"\"This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n This object should only be converted to numpy array before being passed to the model.\n You'd not believe how complex the previous solution was.\"\"\"\n self._frames = frames\n\n def toTorch(self):\n return torch.from_numpy(np.stack(self._frames)) \n\n def __len__(self):\n return len(self.toTorch())\n\n def __getitem__(self, i):\n return self.toTorch()[i]\n \n def shape(self):\n return self.toTorch().shape\n \nclass FrameStack(gym.Wrapper):\n def __init__(self, env, size = 4):\n super(FrameStack, self).__init__(env)\n self.size = size\n self.queue = deque([], maxlen = self.size)\n shp = env.observation_space.shape\n self.observation_space = gym.spaces.Box(low=0, high=255, shape=((size,) + shp), dtype=env.observation_space.dtype)\n\n def reset(self):\n ob = self.env.reset()\n for _ in range(self.size):\n self.queue.append(ob)\n return self._get_ob()\n\n def step(self, action):\n ob, reward, done, info = self.env.step(action)\n self.queue.append(ob)\n return self._get_ob(), reward, done, info\n \n def _get_ob(self):\n #return torch.from_numpy(np.stack(self.queue, axis = 0))\n return LazyFrames(list(self.queue))\n\nif __name__ == \"__main__\":\n env = gym.make(id='SpaceInvadersNoFrameskip-v4')\n env = FrameStack(ClipRewardEnv(MaxAndSkipEnv(ProcessObservation(env))))\n \n print(\"State size:\\t\", env.observation_space.shape) #The frame size corresponds to the state space. \n print(\"Actions:\\t \", env.action_space.n)\n \n print(\"Type of state:\\t\\t\\t\\t\", type(env.reset()))\n print(\"Shape of state as torch tensor:\\t\\t\", env.reset().toTorch().shape)\n print(\"Data type contained in torch tensor:\\t\", env.reset().toTorch().dtype)\n \n def random_play(close_environment = True):\n score = np.array(0).astype(np.int16)\n env.reset()\n while True:\n action = env.action_space.sample()\n state, reward, done, _ = env.step(action)\n score += reward\n if done:\n print(\"Your Score at end of game is: \", score)\n break\n if close_environment:\n env.close()\n \n random_play()","repo_name":"theabm/SpaceInvadersDDQN","sub_path":"Code/EnvWrapper.py","file_name":"EnvWrapper.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27064692140","text":"from RpiMotorLib import RpiMotorLib\nfrom picamera import PiCamera\nimport RPi.GPIO as GPIO\nimport datetime\nimport time\ncamera = PiCamera()\n\ni=1\nn=10\n# Steps:\n# Rotate Stepper 180 degrees\n# Take a photo and save to a folder (numbered numerically)\n# Go to first step\n\n### --- Photo Name and Directory Location ---\n# p = 0 # Initial value for number of photos taken\ndir_pic = '/home/pi/DiceRoller9000/DicePics/' # Directory to dump photos\n# pic_name = '' # File name format - KEEP '%s.jpg'\n### --- Photo Name and Directory Location ---\n\n### --- RPi Motor Pin Config ---\ndirection = 22\nstep = 23\nEN_pin = 24\nSleep_pin = 27\nmotor = RpiMotorLib.A4988Nema(direction, step, (21,21,21), \"DRV8825\")\nGPIO.setup(EN_pin, GPIO.OUT)\nGPIO.setup(Sleep_pin, GPIO.OUT)\n### --- RPi Motor Pin Config\n\ntime.sleep(1)\n\nGPIO.output(Sleep_pin, GPIO.HIGH)\n\ndef motorspin():\n\twhile True:\n\t\tGPIO.output(EN_pin, GPIO.HIGH)\n\t\ttime.sleep (2)\n\t\tcamera.capture(\"/home/pi/DiceRoller9000/Dice_Imgs/dice_pic_\"+str(i)+\".jpg\")\n\t\tprint(\"pic clicked\"+str(i))\n\t\tmotor.motor_go(True, \"1/4\", 100, 0.005, False, 0.05)\t\n\t\ttime.sleep(5)\n\t\tmotor.motor_go(False, \"1/4\", 100, 0.005, False, 0.05)\n\t\ttime.sleep (5)\n\t\tGPIO.output(EN_pin, GPIO.HIGH)\n\t\ndef motorstop():\n\tGPIO.output(EN_pin, GPIO.LOW)\n\t\t\n\t\n\t\n\ntry:\n\tmotorspin()\n\ttime.sleep(10000)\n\tmotorstop()\n\n\t\t\n\t\t\n \n \n \n \nexcept KeyboardInterrupt:\n\tGPIO.cleanup()\n\n\n","repo_name":"Rach-8/Dice-Roller-CV","sub_path":"Steppter_Motor.py","file_name":"Steppter_Motor.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36112794791","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 27 18:34:32 2016\n\n@author: Andrew\ngithub: Andrew62\n\"\"\"\n\nimport os\nimport pickle\nimport numpy as np\nfrom .config import workspace\nfrom multiprocessing import Queue\nfrom .multiprocessing_transformer import MPTransformer\n\ndef pkl_dump(obj, fname):\n with open(fname, 'wb') as target:\n pickle.dump(obj, target, pickle.HIGHEST_PROTOCOL)\n\n \ndef format_array(classes, new_folders):\n print('formatting img ref array...')\n data = []\n for folder in new_folders:\n files = [x for x in os.listdir(folder) if x.endswith('jpg')]\n label = os.path.basename(folder)\n for img in files:\n data.append([label, os.path.join(folder, img)])\n \n return np.asarray(data)\n \ndef split_data(img_data_array, splits, perms=2):\n idx = 0\n total_rows = img_data_array.shape[0]\n increment = lambda x, percent: x+int(total_rows*percent)\n \n for _ in range(perms):\n img_data_array = np.random.permutation(img_data_array)\n \n for subset, percent in splits.items():\n step = increment(idx, percent)\n splits[subset] = img_data_array[idx:step]\n idx = step\n return splits\n\n\nif __name__ == \"__main__\":\n PICKLE_DIR = workspace.pickle_dir\n DATA_DIR = workspace.data_dir\n OUT_DIR = workspace.out_dir\n OUT_CSV = workspace.out_csv\n PROCESSES = 12\n PIXELS = 224\n q = Queue()\n TRANSFORM = True\n new_folders = []\n processes = []\n classes = set()\n\n for i in range(PROCESSES):\n process = MPTransformer(str(i), q, PIXELS, transform=TRANSFORM)\n processes.append(process)\n process.daemon = True\n process.start()\n \n for raw_folder in os.listdir(DATA_DIR):\n if raw_folder.startswith('.'):\n continue\n indir = os.path.join(DATA_DIR, raw_folder)\n\n # Catching the one special case\n if raw_folder == 'Cirneco dell’Etna':\n raw_folder = 'Etna Cirneco'\n\n outdir = os.path.join(OUT_DIR, raw_folder)\n new_folders.append(outdir)\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n job = {'name':raw_folder, 'indir':indir, 'outdir':outdir}\n \n # collect unique image labels\n classes.add(raw_folder)\n \n q.put(job)\n \n for p in processes:\n p.join()\n \n print(\"makeing train, test, validation split...\")\n img_data_array = format_array(classes, new_folders)\n \n data_split_percentage = {'train': 0.9, 'test': 0.05, 'valid': 0.05}\n \n data_splits = split_data(img_data_array, data_split_percentage)\n \n print(\"Saving...\")\n for name, arr in data_splits.items():\n path = os.path.join(PICKLE_DIR, \"{0}.pkl\".format(name))\n pkl_dump(arr, path)\n print(name, arr.shape)\n class_path = os.path.join(PICKLE_DIR, 'classes.pkl')\n pkl_dump(list(classes), class_path)\n\n print(\"complete!\")\n","repo_name":"Andrew62/dogcatcher","sub_path":"data_formatting_scripts/mp_image_prep.py","file_name":"mp_image_prep.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13373315748","text":"import argparse\nimport json\nimport logging\nimport os\nimport multiprocessing\nimport re\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom math import ceil\nfrom pathlib import Path\nfrom typing import Dict, Union, Optional, List\n\nimport datasets\nfrom dotenv import load_dotenv\nfrom numpy import log10\nfrom numpy.random import default_rng, SeedSequence\n\nfrom datasets import concatenate_datasets, load_dataset, utils, Features, Value, Dataset\n\nlogger = logging.getLogger()\nhandler = logging.StreamHandler()\nformatter = logging.Formatter(\"%(asctime)s - %(name)-15s - %(levelname)-8s - %(message)s\")\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.setLevel(logging.INFO)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n # Load\n parser.add_argument(\n \"--dataset_ratios_path\",\n type=str,\n required=True,\n help=\"path to JSON file containing input dataset ratios. Values ares dictionary: {'dataset_path': str, 'is_catalogue': bool, 'ratio': float}\",\n )\n parser.add_argument(\"--split\", type=str, default=\"train\", help=\"split name, default 'train'\")\n parser.add_argument(\n \"--load_num_proc\", type=int, default=1, help=\"number of procs to use for loading datasets, default 1\"\n )\n # Shard\n parser.add_argument(\"--shard_max_size\", type=int, default=10_000_000_000, help=\"max shard size, default 10GB\")\n # Save\n parser.add_argument(\"--save_path\", type=str, default=\".\", help=\"path to save the dataset, default '.'\")\n parser.add_argument(\"--save_num_proc\", type=int, default=1, help=\"number of procs to use for saving, default 1\")\n parser.add_argument(\"--save_batch_size\", type=int, help=\"batch size used for saving\")\n # Parse args\n args = parser.parse_args()\n # Post-process args\n args.dataset_ratios_path = Path(args.dataset_ratios_path)\n args.save_path = Path(args.save_path)\n return args\n\n\ndef convert_types(features):\n if isinstance(features, dict) and \"_type\" in features:\n return getattr(datasets, features[\"_type\"])(features[\"dtype\"])\n elif isinstance(features, dict):\n return {key: convert_types(value) for key, value in features.items()}\n elif isinstance(features, list):\n return [convert_types(value) for value in features]\n\n\ndef get_features():\n features = {\n \"HtmlPreprocessor_error\": {\"dtype\": \"int64\", \"id\": None, \"_type\": \"Value\"},\n \"HtmlPreprocessor_error_comment\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"content_languages\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"content_mime_detected\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"depth\": {\"dtype\": \"int16\", \"id\": None, \"_type\": \"Value\"},\n \"download_exception\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"external_urls\": [{\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"}],\n \"fetch_redirect\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"fetch_status\": {\"dtype\": \"int32\", \"id\": None, \"_type\": \"Value\"},\n \"fetch_time\": {\"dtype\": \"timestamp[ns]\", \"id\": None, \"_type\": \"Value\"},\n \"html_error\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"html_footer\": [{\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"}],\n \"html_head\": [{\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"}],\n \"html_str\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"html_title\": [{\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"}],\n \"metadata_html\": [\n {\n \"char_end_idx\": {\"dtype\": \"int64\", \"id\": None, \"_type\": \"Value\"},\n \"char_start_idx\": {\"dtype\": \"int64\", \"id\": None, \"_type\": \"Value\"},\n \"html_attrs\": {\n \"attrs\": [{\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"}],\n \"values\": [{\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"}],\n },\n \"key\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"relative_end_pos\": {\"dtype\": \"int64\", \"id\": None, \"_type\": \"Value\"},\n \"relative_start_pos\": {\"dtype\": \"int64\", \"id\": None, \"_type\": \"Value\"},\n \"type\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"value\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n }\n ],\n \"seed_id\": {\"dtype\": \"int32\", \"id\": None, \"_type\": \"Value\"},\n \"text\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"url\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"url_host_name\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"url_host_registered_domain\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"url_host_tld\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"url_surtkey\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"warc_filename\": {\"dtype\": \"string\", \"id\": None, \"_type\": \"Value\"},\n \"warc_record_length\": {\"dtype\": \"int32\", \"id\": None, \"_type\": \"Value\"},\n \"warc_record_offset\": {\"dtype\": \"int32\", \"id\": None, \"_type\": \"Value\"},\n }\n return Features(convert_types(features))\n\n\ndef collapse_meta_(batch):\n \"\"\"{\"text\": str, \"meta\": str}\"\"\"\n # TODO: check that\n columns_not_in_meta = [\"text\", \"html_error\", \"html_footer\", \"html_head\", \"html_str\", \"html_title\", \"metadata_html\"]\n columns_to_collapse = [name for name in batch.keys() if name not in columns_not_in_meta]\n\n number_of_rows = len(batch[\"text\"])\n metas = [\n {\n **{name: batch[name][i] for name in columns_to_collapse},\n \"source_dataset\": f\"pseudo-crawl--{batch['seed_id'][i]}\",\n }\n for i in range(number_of_rows)\n ]\n\n new_batch = {\"text\": batch[\"text\"], \"meta\": [str(meta) for meta in metas]}\n return new_batch\n\n\ndef collapse_meta(ds: Dataset, num_proc: int):\n \"\"\"{\"text\": str, \"meta\": str}\"\"\"\n columns_to_keep = [\"text\"]\n column_names_to_remove = [name for name in ds.column_names if name not in columns_to_keep]\n return ds.map(collapse_meta_, batched=True, num_proc=num_proc, remove_columns=column_names_to_remove)\n\n\ndef process_single_catalogue_meta_(meta: Optional[Union[str, Dict]], source_dataset) -> str:\n if meta is None:\n meta = {}\n elif isinstance(meta, str):\n meta = eval(meta)\n try:\n meta[\"source_dataset\"] = source_dataset\n except:\n raise ValueError(f\"Got {meta} of type {type(meta)}. Expected an dictionary. This is from {source_dataset}\")\n return str(meta)\n\n\ndef process_catalogue_meta(batch, source_dataset=None, columns_not_in_meta_or_text=None):\n num_elts = len(batch[next(iter(batch.keys()))])\n default_meta = process_single_catalogue_meta_(None, source_dataset)\n\n # If other columns exist we put them into meta\n if columns_not_in_meta_or_text:\n if \"meta\" not in batch:\n batch[\"meta\"] = [{} for _ in range(num_elts)]\n batch[\"meta\"] = [\n {\n **(batch[\"meta\"][index]),\n **{column_name: batch[column_name][index] for column_name in columns_not_in_meta_or_text}\n }\n for index in range(num_elts)\n ]\n\n if \"meta\" in batch:\n batch[\"meta\"] = [process_single_catalogue_meta_(meta, source_dataset) for meta in batch[\"meta\"]]\n else:\n batch[\"meta\"] = [default_meta for _ in range(num_elts)]\n\n return {\"text\": batch[\"text\"], \"meta\": batch[\"meta\"]}\n\ndef load_single_dataset(args):\n try:\n ds_ratio, split, seed, num_proc = args\n ds_name = ds_ratio[\"dataset_path\"]\n ratio = ds_ratio[\"ratio\"]\n is_catalogue = ds_ratio[\"is_catalogue\"]\n # Load\n if is_catalogue:\n ds = load_dataset(ds_name, use_auth_token=True, ignore_verifications=True)\n else:\n # We assume it comes from pseudo crawl.\n # Pseudo crawl needs to be downloaded locally beforehand.\n features = get_features()\n dataset_path = Path(ds_name)\n ds = load_dataset(\n str((dataset_path / \"text__html\").absolute()), data_files=\"**.jsonl.gz\", features=features\n )\n # Split\n if split not in ds:\n logger.info(f\"No split named {split} in dataset {ds_name}\")\n return\n ds = ds[split]\n\n # Sample dataset\n if ratio < 1:\n num_samples = int(len(ds) * ratio)\n if num_samples == 0:\n return None\n rng = default_rng(seed)\n indices = rng.choice(len(ds), size=num_samples, replace=False, shuffle=False)\n ds = ds.select(indices)\n\n # Process meta: add source_dataset and cast dict to str\n if is_catalogue:\n columns_not_in_meta_or_text = [column_name for column_name in ds.column_names if column_name not in [\"text\", \"meta\"]]\n source_dataset = re.match(r\".*bigscience-catalogue-lm-data/(lm_([^/])*)(/data)?\", ds_name).group(1)\n ds = ds.map(\n partial(process_catalogue_meta, source_dataset=source_dataset, columns_not_in_meta_or_text=columns_not_in_meta_or_text),\n batched=True,\n num_proc=num_proc,\n desc=f\"Processing {ds_name}\",\n remove_columns=columns_not_in_meta_or_text\n )\n else:\n # collapse all meta data in \"meta\" column\n ds = collapse_meta(ds, num_proc=num_proc)\n\n return ds\n except BaseException as err:\n logger.error(f\"Error while loading dataset {ds_name}\")\n raise err\n\n\ndef compute_number_of_shards(ds, max_size=10_000_000_000):\n ds_nbytes = get_size(ds)\n logger.info(f\"Estimated dataset size: {ds_nbytes} bytes\")\n logger.info(f\"Max shard size: {max_size} bytes\")\n number_shards = ceil(ds_nbytes / max_size)\n return number_shards if number_shards < len(ds) else len(ds)\n\ndef get_shard(shard_id: int, number_shards: int, ds: Dataset) -> Dataset:\n logger.info(f\"Shard {shard_id}/{number_shards}\")\n shard = ds.shard(num_shards=number_shards, index=shard_id, contiguous=True)\n return shard\n\ndef shard_dataset(ds, num_proc, max_size=10_000_000_000):\n number_shards = compute_number_of_shards(ds, max_size=max_size)\n if number_shards <= 1:\n return [ds]\n logger.info(f\"Shard dataset in {number_shards} shards\")\n shards = []\n for shard_id in range(number_shards):\n shard = get_shard(shard_id=shard_id, number_shards=number_shards, ds=ds)\n shards.append(shard)\n\n # # Parallel version\n # with multiprocessing.Pool(min(number_shards, num_proc)) as pool:\n # shards = [\n # ds\n # for ds in utils.tqdm(\n # pool.imap(\n # partial(get_shard, ds=ds, number_shards=number_shards),\n # range(number_shards),\n # ),\n # total=number_shards,\n # unit=\"ba\",\n # disable=bool(utils.logging.get_verbosity() == utils.logging.NOTSET),\n # desc=\"Sharding dataset\",\n # )\n # if ds is not None\n # ]\n\n return shards\n\n\ndef save_shards(shards, path=Path(\".\"), num_proc=1, batch_size=None):\n path.mkdir(parents=True, exist_ok=True)\n num_shards = len(shards)\n # for i, shard in enumerate(shards):\n # save_dataset(shard, path=path, shard_id=i, num_shards=num_shards, num_proc=num_proc, batch_size=batch_size)\n\n # Parallel version\n with multiprocessing.Pool(min(num_shards, num_proc)) as pool:\n pool.starmap(\n save_dataset,\n [\n (shard, path, shard_id, num_shards, 1, batch_size)\n for shard_id, shard in enumerate(shards)\n ]\n )\n\n\ndef save_dataset(shard: Dataset, path=Path(\".\"), shard_id=0, num_shards=1, num_proc=1, batch_size=None):\n width = int(log10(num_shards)) + 1\n save_path = path / f\"shard-{shard_id:0>{width}}-of-{num_shards:0>{width}}.jsonl.gz\"\n if save_path.exists():\n logger.info(f\"Shard was already saved: {save_path}\")\n return\n with tmp_path(save_path) as tmp_save_path:\n shard.to_json(\n tmp_save_path,\n num_proc=num_proc,\n batch_size=batch_size,\n )\n\n\n@contextmanager\ndef tmp_path(path):\n try:\n tmp_path = path.with_name(f\"tmp-{path.name}\")\n yield tmp_path\n except:\n tmp_path.unlink(missing_ok=True)\n else:\n tmp_path.rename(path)\n\ndef get_size(ds: Dataset) -> int:\n if ds._indices is not None:\n return ds.data.nbytes * len(ds._indices) / len(ds.data)\n else:\n return ds.data.nbytes\n\ndef load_datasets(dset_ratios: List, num_proc: int, split: str, seed: SeedSequence) -> List[Dataset]:\n logger.info(\"Start load_datasets\")\n # dsets = [\n # ds\n # for ds in utils.tqdm(\n # [\n # load_single_dataset((dset_ratio, split, child_seed, num_proc))\n # for dset_ratio, child_seed in zip(dset_ratios, seed.spawn(len(dset_ratios)))\n # ],\n # total=len(dset_ratios),\n # unit=\"ba\",\n # disable=bool(utils.logging.get_verbosity() == utils.logging.NOTSET),\n # desc=\"Loading dataset\",\n # )\n # if ds is not None\n # ]\n\n # Parallel version\n with multiprocessing.Pool(num_proc) as pool:\n dsets = [\n ds\n for ds in utils.tqdm(\n pool.imap(\n load_single_dataset,\n [\n (dset_ratio, split, child_seed, 1)\n for dset_ratio, child_seed in zip(dset_ratios, seed.spawn(len(dset_ratios)))\n ],\n ),\n total=len(dset_ratios),\n unit=\"ba\",\n disable=bool(utils.logging.get_verbosity() == utils.logging.NOTSET),\n desc=\"Loading dataset\",\n )\n if ds is not None\n ]\n return dsets\n\n\ndef main():\n args = parse_args()\n\n # Init\n # Env variables\n if Path(\".env\").exists:\n load_dotenv()\n # Random generator\n seed = SeedSequence(42)\n # Read dataset ratios\n with args.dataset_ratios_path.open() as f:\n dset_ratios = json.load(f)\n # Load datasets\n dsets = load_datasets(dset_ratios, args.load_num_proc, args.split, seed)\n\n if not dsets:\n logger.info(f\"No datasets to be aggregated\")\n return\n # Concatenate datasets\n logger.info(\"Start concatenate_datasets\")\n dset = concatenate_datasets(dsets, split=args.split)\n del dsets\n logger.info(f\"Estimated size: {get_size(dset)} bytes\")\n # Shuffle\n logger.info(\"Start shuffle dataset\")\n dset = dset.shuffle(seed=seed)\n # Shard\n logger.info(\"Start shard_dataset\")\n shards = shard_dataset(dset, num_proc=args.load_num_proc, max_size=args.shard_max_size)\n # Save\n logger.info(\"Start: save dataset\")\n save_shards(shards, path=args.save_path, num_proc=args.save_num_proc, batch_size=args.save_batch_size)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bigscience-workshop/data-preparation","sub_path":"sourcing/Gathering Identified Datasets and Collections/aggregate_datasets.py","file_name":"aggregate_datasets.py","file_ext":"py","file_size_in_byte":15050,"program_lang":"python","lang":"en","doc_type":"code","stars":239,"dataset":"github-code","pt":"81"} +{"seq_id":"16120510357","text":"from flask_restful import Resource, fields, abort, reqparse\nfrom models.client_product_model import ClientProduct\nfrom db.db import session\nfrom modules.json_serializator import encode_json\nformular_fields = {\n 'id': fields.Integer,\n 'name': fields.String,\n}\n\nparser = reqparse.RequestParser()\n\nclass ClientProductModelResource(Resource):\n def get(self, id):\n try:\n result = ClientProduct()\n result.init_empty_model(id)\n\n return encode_json(result)\n except Exception as e:\n session.rollback()\n abort(400, message=\"Error while deleting record products\")\n\n","repo_name":"vyadzmak/Landau.X.Api","sub_path":"res/client_product_model_resources.py","file_name":"client_product_model_resources.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26439579177","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\n\n# Third-party, non-Django\nfrom taggit.managers import TaggableManager\n\n\nclass PublishedManager(models.Manager):\n def get_queryset(self):\n return super().get_queryset().filter(status=Post.Status.PUBLISHED)\n\n\nclass Post(models.Model):\n # Another pattern for establishing enumeration-style choices\n # https://docs.djangoproject.com/en/4.1/ref/models/fields/#enumeration-types\n class Status(models.TextChoices):\n DRAFT = \"DF\", \"Draft\"\n PUBLISHED = \"PB\", \"Published\"\n\n title = models.CharField(max_length=250)\n slug = models.SlugField(max_length=250, unique_for_date=\"publish\")\n author = models.ForeignKey(\n to=User, on_delete=models.CASCADE, related_name=\"blog_posts\"\n )\n body = models.TextField()\n publish = models.DateTimeField(default=timezone.now)\n created = models.DateTimeField(auto_now_add=True)\n update = models.DateTimeField(auto_now=True)\n status = models.CharField(\n max_length=2, choices=Status.choices, default=Status.DRAFT\n )\n\n objects: models.Manager = models.Manager()\n published: PublishedManager = PublishedManager()\n tags: TaggableManager = TaggableManager()\n\n # Django pattern: nested class for defining metadata\n # https://docs.djangoproject.com/en/4.1/topics/db/models/#meta-options\n # https://docs.djangoproject.com/en/4.1/ref/models/options/\n class Meta:\n # Blogs typically order by latest published...\n ordering = [\"-publish\"]\n\n indexes = [models.Index(fields=[\"-publish\"])]\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse(\n viewname=\"blog:post_detail\",\n args=[self.publish.year, self.publish.month, self.publish.day, self.slug],\n )\n\n\nclass Comment(models.Model):\n post: models.ForeignKey = models.ForeignKey(\n Post, on_delete=models.CASCADE, related_name=\"comments\"\n )\n\n name = models.CharField(max_length=80)\n email = models.EmailField()\n body = models.TextField()\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n active = models.BooleanField(default=True)\n\n class Meta:\n ordering = [\"created\"]\n indexes = [models.Index(fields=[\"created\"])]\n\n def __str__(self) -> str:\n return f\"Comment by {self.name} on {self.post}\"\n","repo_name":"ajpotts01/django_blog","sub_path":"django_blog/blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14775201088","text":"# -*- coding: utf-8 -*-\n\nfrom flectra import models, fields, api\nimport hashlib \nfrom werkzeug import urls\nimport logging\nfrom flectra import api, fields, models, _\nfrom flectra.addons.payment.models.payment_acquirer import ValidationError\nfrom flectra.tools.float_utils import float_compare\n\n_logger = logging.getLogger(__name__)\n\nclass AcquirerVCSWeb(models.Model):\n _inherit = 'payment.acquirer'\n\n provider = fields.Selection(selection_add=[('vcsweb', 'VCSWeb')])\n vcsweb_md5_secret = fields.Char('Shared MD5 Secret',help=\"Optional shared secret for use in hash token\", required_if_provider='vcsweb', groups='base.group_user')\n vcsweb_terminal_id = fields.Char('Terminal ID', help=\"Your VCS terminal id\", required_if_provider='vcsweb', groups='base.group_user')\n vcsweb_personal_authentication_message = fields.Char(\"PAM\",help=\"Your Personal Authentication Message\",required_if_provider='vcsweb', groups='base.group_user')\n _approved_url=\"/payment/vcsweb/approved\"\n _declined_url=\"/payment/vcsweb/declined\"\n _cancelled_url=\"/payment/vcsweb/cancelled\"\n\n def _calculate_vcsweb_hash(self,inout,values):\n\n if inout not in ('in', 'out'):\n raise Exception(\"Type must be 'in' or 'out'\")\n\n if inout == \"out\":\n params=values[\"terminal_id\"]+values[\"tx_reference_no\"]+values[\"tx_description\"]+values[\"tx_amount\"]+values[\"tx_currency\"]\n params+=values[\"cancelled_url\"]+values['customer_email'] +values[\"return_url\"]+ values[\"customer_id\"]\n params+=self.vcsweb_md5_secret\n else:\n keys = ['p1','p2','p3','p4','p5','p6','p7','p8','p9','p10','p11','p12',\"pam\",\"m_1\",\"CardHolderIpAddr\",\"CardholderIpAddr\",\"MaskedCardNumber\",\"TransactionType\",\"CustomerID\",'MerchantToken']\n params = ''\n for key in keys:\n if key == \"pam\":\n params=params+self.vcsweb_personal_authentication_message\n elif key in values:\n params = params + values[key]\n params= params + self.vcsweb_md5_secret\n return hashlib.md5(params.encode()).hexdigest()\n\n\n def _get_vcsweb_urls(self):\n return {\n 'vcsweb_form_url': 'https://www.vcs.co.za/vvonline/vcspay.aspx',\n } \n\n @api.multi\n def vcsweb_form_generate_values(self, values):\n #Should get url dynamically but doesn't appear to work for multi-site\n #base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')\n base_url = \"https://cyberconnect.shop\"\n #base_url=\"http://35.200.126.168:7073\"\n values.update({\n 'tx_reference_no': values['reference'],\n 'tx_amount': '%.2f' % values['amount'],\n 'tx_description': \"Goods from Cyber Connect\",\n 'tx_currency': 'ZAR',\n 'terminal_id': self.vcsweb_terminal_id,\n 'urls_provided': 'Y',\n 'approved_url': urls.url_join(base_url, self._approved_url),\n 'declined_url': urls.url_join(base_url, self._declined_url),\n 'cancelled_url': urls.url_join(base_url, self._cancelled_url),\n 'customer_id': \"%d\" % values.get('partner_id',\"\"),\n 'customer_email': values.get('partner_email', ''),\n 'customer_name': values.get('partner_name', ''),\n 'return_url': values.get(\"return_url\"),\n }) \n values['tx_hash'] = self._calculate_vcsweb_hash(\"out\",values)\n return values\n\n @api.multi\n def vcsweb_get_form_action_url(self):\n return self._get_vcsweb_urls()['vcsweb_form_url']\n\nclass TxVCSWeb(models.Model):\n _inherit = 'payment.transaction'\n\n @api.model\n def _vcsweb_form_get_tx_from_data(self, data):\n #if controller is called directly.\n reference = data.get('p2');\n if not reference:\n error_msg = _('VCSWeb: no transactional reference received') \n raise ValidationError(error_msg)\n tx = self.search([(\"reference\",\"=\",reference)])\n #did we find the transaction?\n if not tx:\n error_msg = _('VCSWeb: received data for reference %s; no order found') % (reference)\n raise ValidationError(error_msg)\n elif len(tx) > 1:\n error_msg = _('VCSWeb: received data for reference %s; multiple orders found') % (reference)\n raise ValidationError(error_msg)\n #check hash - just redisplay transaction\n if data.get(\"p3\",\"\") == '~MD5 Hash mismatch':\n _logger.info(\"acquirer found hash mismatch on input data\")\n return tx\n hash = data.get(\"Hash\",None)\n if hash:\n calculated_hash = tx.acquirer_id._calculate_vcsweb_hash('in', data)\n if hash.upper() != calculated_hash.upper():\n error_msg = _('VCSWeb: invalid hash, received %s, computed %s, for data %s') % (hash, calculated_hash, data)\n _logger.info(error_msg)\n raise ValidationError(error_msg)\n return tx\n\n def _vcsweb_form_validate(self, data):\n status = data.get('p3', 'CANCELLED')\n \n if \"APPROVED\" in status:\n self.write({\n 'state': 'done',\n 'state_message': status,\n 'acquirer_reference': data.get('Uti'),\n })\n return True\n elif \"CANCELLED\" in status:\n self.write({\n 'state': 'cancel',\n 'state_message': status,\n #'acquirer_reference': data.get('Uti'),\n })\n #return True\n else:\n self.write({\n 'state': 'error',\n 'state_message': status,\n 'acquirer_reference': data.get('Uti'),\n })\n return False\n \n\n\n def _vcsweb_form_get_invalid_parameters(self, data):\n invalid_parameters = []\n #don't know why we would already have the acquirer reference but all the built in \n #addons do this test so just following tradition.\n if self.acquirer_reference and data.get('Uti') != self.acquirer_reference:\n invalid_parameters.append(('Uti', data.get('Uti'), self.acquirer_reference))\n\n #check if the reference we sent is what we get back\n if self.reference != data.get(\"p2\"):\n invalid_parameters.append(('p2', data.get('p2'), self.acquirer_reference))\\\n\n #other tests CustomerID?\n\n #another common check\n if 'p6' in data and float_compare(float(data['p6']), self.amount, 2) != 0:\n invalid_parameters.append(\n ('Amount', data.get('p6'), '%.2f' % self.amount))\n\n return invalid_parameters\n","repo_name":"jumping-bean/flectra-addon-vcs","sub_path":"payment_vcsweb/models/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":6653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2242033708","text":"from csv import field_size_limit\nfrom email.policy import default\nfrom optparse import Values\nfrom statistics import mode\nfrom time import timezone\nfrom tkinter.tix import Tree\nfrom django.db import models\n# from django.contrib.auth.models import User\nfrom django.db.models.deletion import CASCADE\n\n#abstract user model\nfrom django.contrib.auth.models import AbstractUser\n\nclass User(AbstractUser):\n name = models.CharField(max_length=200, null=True)\n email = models.EmailField(unique=True, error_messages= { 'required':\"You must enter your email\"})\n bio = models.TextField(null=True)\n avatar = models.ImageField(null=True, default='avatar.png')\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = []\n\n\n#================================Create your models here.\n\nclass Topic(models.Model):\n name = models.CharField(max_length=200)\n\n def __str__(self):\n return self.name\n#======================create a table class\nclass Room(models.Model):\n host = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n topic = models.ForeignKey(Topic, on_delete=models.SET_NULL, null=True)\n name = models.CharField(max_length=200) \n participants = models.ManyToManyField(User, related_name='participants', blank=True) #creats many to many relationship for user\n description = models.TextField(null=True, blank=True)\n email = models.CharField(max_length=255, null=False, default=timezone)\n updated = models.DateTimeField(auto_now=True)\n created = models.DateTimeField(auto_now_add=True)\n\n#=====================order values by (date created/update)\n class Meta:\n ordering = ['-updated', '-created']\n\n #=====================pass string representation\n def __str__(self):\n return self.name\n\n\nclass Message(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE, default=1)\n#=================delete user's content if room is deleted i.e. using CASCADE\n room = models.ForeignKey(Room, on_delete=models.CASCADE)\n body = models.TextField()\n \n updated = models.DateTimeField(auto_now=True)\n created = models.DateTimeField(auto_now_add=True)\n \n #======================order values by (date created/update)\n class Meta:\n ordering = ['-updated', '-created']\n\n def __str__(self):\n \n return self.body[0:50]\n \n \n","repo_name":"allanjade/roomsApp","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41022338174","text":"from multiprocessing import allow_connection_pickling\nfrom pathlib import Path\nfrom collections import deque\n\n\nN_CUPS = 1e7 # всего количество для доставки\nMAX_TIME = 1440 # максимальное время на доставку груза в минутах (т.е. 24 часа)\nCUP_WEIGHT = 100 # вес одной кружки в граммах\nTRUCK_WEIGHT = 3e6 # вес грузовика в граммах\nLOCAL_FILE = True\n\n# считываем данные из файла и задаём граф в виде матрицы смежности\nif LOCAL_FILE:\n input_file_path = Path(__file__).parent.resolve() / \"task_1967_input.txt\"\n with open(input_file_path, 'r') as file:\n n, m = list(map(int, file.readline().split())) # количество узловых пунктов и количество дорог соответственно\n # собираем информацию о дорогах\n graph = {k: list() for k in range(n)}\n for _ in range(m):\n n_from, n_to, t, w = list(map(int, file.readline().split()))\n n_from, n_to = n_from - 1, n_to - 1\n # где\n # n_from, n_to - узловые пункты, соединенные дорогой\n # t - время прохождения участка дороги (в минутах)\n # w - максимальный вес грузовика (в граммах)\n graph[n_from].append((n_to, t, w))\n graph[n_to].append((n_from, t, w))\nelse:\n n, m = list(map(int, input().split())) # количество узловых пунктов и количество дорог соответственно\n # собираем информацию о дорогах\n graph = {k: list() for k in range(n)}\n for _ in range(m):\n n_from, n_to, t, w = list(map(int, input().split()))\n n_from, n_to = n_from - 1, n_to - 1\n # где\n # n_from, n_to - узловые пункты, соединенные дорогой\n # t - время прохождения участка дороги (в минутах)\n # w - максимальный вес грузовика (в граммах)\n graph[n_from].append((n_to, t, w))\n graph[n_to].append((n_from, t, w))\n\n# алгоритм Дейкстры позволит найти все города, до которых возможно добраться за время \n","repo_name":"JosephFrancisTribbiani/ds","sub_path":"Informatics/Cources/Data_structures_and_algorithms/Algorithms_on_graphs/topic_4/task_1967_transportation.py","file_name":"task_1967_transportation.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74795209225","text":"class Solution:\n def winnerSquareGame(self, n: int) -> bool:\n '''\n dp[i] = 为true or false => 轮到谁,谁就是true or false\n 因为另一个人会聪明的选择\n \n 所以 就变成去找说,因为都是Alex先选择,所以就看目前的dp[n去减平方数]有没有等于False,代表Alex选择了此平方数之后,Bob绝对会输,有的话这格就为True,否则为False\n 同时可以建立平方数的表\n '''\n \n def isSqrt(num):\n tmp = int(sqrt(num))\n return tmp == sqrt(num)\n \n square = []\n dp = [False] * (n+1)\n \n for i in range(1, n+1):\n if isSqrt(i):\n square.append(i)\n \n for s in square[::-1]:\n if dp[i-s] == False:\n dp[i] = True\n break\n return dp[-1]\n","repo_name":"novayo/LeetCode","sub_path":"1510_Stone_Game_IV/try_1.py","file_name":"try_1.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"35826465816","text":"\nfrom pathlib import Path\nBASE_DIR = Path('./pybert')\nnow_path = Path.cwd()\nconfig = {\n 'raw_data_path': now_path.absolute() / 'dataset/train_sample.csv',\n 'test_path': now_path.absolute() / 'dataset/test.csv',\n\n 'data_dir': BASE_DIR / 'dataset',\n 'log_dir': BASE_DIR / 'output/log',\n 'writer_dir': BASE_DIR / \"output/TSboard\",\n 'figure_dir': BASE_DIR / \"output/figure\",\n 'checkpoint_dir': BASE_DIR / \"output/checkpoints\",\n 'cache_dir': BASE_DIR / 'model/',\n 'result': BASE_DIR / \"output/result\",\n 'test_output':BASE_DIR.absolute().parent/'test_output',\n\n 'bert_vocab_path': now_path.absolute()/'pretrained_model/Bert-wwm-ext/vocab.txt',\n 'bert_config_file': now_path.absolute()/'pretrained_model/Bert-wwm-ext/config.json',\n 'bert_model_dir': now_path.absolute()/'pretrained_model/Bert-wwm-ext/'\n}\n\nimport os\nif not os.path.exists(config['log_dir']):\n os.mkdir(config['log_dir'])\nif not os.path.exists(config['checkpoint_dir']):\n os.mkdir(config['checkpoint_dir'])\nif not os.path.exists(config['test_output']):\n os.mkdir(config['test_output'])\n","repo_name":"shenzaimin/CCKS-2021-Huawei-Event-Extraction","sub_path":"CCKS-Cls/pybert/configs/basic_config.py","file_name":"basic_config.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"20627581224","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nimport time\nimport json\nfrom urllib import unquote\n\nYUZDE_SIFIR = 'https://dogrula.org/wp-content/uploads/2018/10/yüzde-sıfır-1024x388.png'\nYUZDE_YUZ = 'https://dogrula.org/wp-content/uploads/2018/12/yüzde-100.png'\n\n\ndef init_driver(headless=True, disable_gpu=False, enable_adblock=True, timeout=120, maximize=False):\n\toptions = Options()\n\toptions.add_argument('--headless') if headless else 1 == 1\n\toptions.add_argument('--disable-gpu') if disable_gpu else 1 == 1\n\t#options.add_extension('./adblock.crx') if enable_adblock else 1 == 1\n\n\tbrowser = webdriver.Chrome(chrome_options=options)\n\tbrowser.set_page_load_timeout(timeout) # ----------- Throws timeout exception after 300 sec.\n\tbrowser.maximize_window() if maximize else 1 == 1\n\n\treturn browser\n\n\ndef wait_until_find_xpaths(xpath, pathRoot):\n\tt = True\n\tfailCount = 0\n\tmaxFail = 2\n\n\twhile t and failCount < maxFail:\n\t\ttry:\n\t\t\telements = pathRoot.find_elements_by_xpath(xpath)\n\t\t\tt = False\n\t\texcept:\n\t\t\tt = True\n\t\t\tfailCount = failCount + 1\n\t\t\ttime.sleep(0.1)\n\tif (failCount == maxFail):\n\t\treturn None\n\telse:\n\t\treturn elements\n\n\ndef wait_until_find_xpath(xpath, pathRoot):\n\tt = True\n\tfailCount = 0\n\tmaxFail = 2\n\n\twhile t and failCount < maxFail:\n\t\ttry:\n\t\t\telement = pathRoot.find_element_by_xpath(xpath)\n\t\t\tt = False\n\t\texcept:\n\t\t\tt = True\n\t\t\tfailCount = failCount + 1\n\t\t\ttime.sleep(0.1)\n\tif (failCount == maxFail):\n\t\treturn None\n\telse:\n\t\treturn element\n\n\ndef open_page(browser, URL):\n\tbrowser.get(URL)\n\tbrowser.execute_script(\"return window.stop\")\n\n\ndef open_new_window(url):\n\topeningPageScript = '''window.open(\"{}\");'''.format(url)\n\tbrowser.execute_script(openingPageScript)\n\tbrowser.switch_to_window(browser.window_handles[len(browser.window_handles) - 1])\n\n\ndef close_window():\n\tbrowser.close()\n\tbrowser.switch_to_window(browser.window_handles[len(browser.window_handles) - 1])\n\n\ndef scrolling(browser):\n\tSCROLL_PAUSE_TIME = 0.5\n\tMAX_TRY = 10\n\tcounter = 0\n\tlength = 0\n\tjump_length = 1000\n\n\tnew_height = 0\n\twhile True:\n\t\tlast_height = new_height\n\t\tbrowser.execute_script(\"window.scrollTo({}, {});\".format(length, length + jump_length))\n\t\tlength += jump_length\n\t\ttime.sleep(0.5)\n\t\tnew_height = browser.execute_script(\"return document.body.scrollHeight\")\n\t\tprint(\"last height: {}, new_height: {}\".format(last_height, new_height))\n\t\tif last_height == new_height:\n\t\t\tprint(counter)\n\t\t\tcounter += 1\n\t\t\tif counter == MAX_TRY:\n\t\t\t\tbreak\n\t\telse:\n\t\t\tcounter = 0\n\n\nJSON_PATH = 'output/dogrulukpayi-com.json'\nimport os\ntry:\n\tos.remove(JSON_PATH)\nexcept:\n\tpass\n\ncategories = ['sanayi-ticaret-ve-finans', 'savunma-ve-guvenlik', 'ekonomi', 'egitim', 'enerji', 'mali-politikalar', 'uluslararasi-iliskiler', 'adalet', 'sosyal-politikalar', \n\t'bayindirlik-imar-ve-sehircilik', 'teknoloji', 'genel-politika', 'ic-politika', 'cevre', 'tarim-orman-ve-koyisleri']\nif __name__ == '__main__':\n\twith open(JSON_PATH, 'a') as f:\n\t\tf.write(\"[\\n\")\n\n\tMAIN_URL = 'https://www.dogrulukpayi.com/kategori/'\n\tbrowser = init_driver(headless=True)\n\n\tfor category in categories:\n\t\turl = MAIN_URL+category\n\t\topen_page(browser, url)\n\t\tprint(url)\n\t\tscrolling(browser)\n\t\tfor article in wait_until_find_xpaths(\"//div[contains(@class, 'card-content') and contains(@class, 'col-lg-9')]\", browser):\n\t\t\tif wait_until_find_xpath(\"./a/div/span\", article) != None: # Check if its claim check.\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tcontinue\n\n\t\t\tclaim_blob = {}\n\t\t\tclaim_blob['date'] = wait_until_find_xpath(\".//subtitle\", article).text\n\t\t\topen_new_window(wait_until_find_xpath(\"./a\", article).get_attribute('href'))\n\t\t\tclaim_blob['claim'] = wait_until_find_xpath(\"//div[contains(@class, 'hero-card') and contains(@class, 'statement')]/div/h2\", browser).text\n\t\t\tclaim_blob['claim'] += \" \"\n\t\t\tclaim_blob['claim'] += wait_until_find_xpath(\"//div[contains(@class, col-lg-8) and contains(@class, 'col-sm-12') and contains(@class, 'bodycopy')]/h2\", browser).text\n\t\t\t# claim_blob['date'] = wait_until_find_xpath(\"//*[contains(@class, 'entry-date') and contains(@class, 'updated') and contains(@class, 'td-module-date')]\", browser).get_attribute('datetime')\n\t\t\tclose_window()\n\n\t\t\twith open(JSON_PATH, 'a') as f:\n\t\t\t\tf.write(json.dumps(claim_blob))\n\t\t\t\tf.write(',\\n')\n\n\twith open(JSON_PATH, 'a') as f:\n\t\tf.write(\"]\")\n\n\n\t\t","repo_name":"doruksahin/KontroleDegerMi","sub_path":"py-work/crawl/dogrulukpayicom.py","file_name":"dogrulukpayicom.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"1375721892","text":"import os\nimport glob\nimport open3d as o3d\nimport json\nimport numpy as np\nfrom omniobject3d import align, utils\n\ndir_path=r'./OpenXD-OmniObject3D-New/raw/decimated'\n\npoints_index=[0,1,7,2,3,6,4,5]\n\nface=[[0,2,7,1],[0,3,5,2],[3,0,1,6],[4,5,3,6],[4,6,1,7],[4,7,2,5]]\n\ndef drawbox(points):\n lines = [[0,1],[1,2],[2,3],[3,0],\n [4,5],[5,6],[6,7],[7,4],\n # [0,4],[1,5],[2,6],[3,7]\n ]\n\n def diff(a, b, len=1):\n if abs(a - b)<len/2:\n return abs(a-b)\n return len-abs(a-b)\n\n # Use the same color for all lines\n\n colors = [[max(0,1-diff(i/len(lines),0)*3), max(0,1-diff(i/len(lines),1/3)*3), max(0,1-diff(i/len(lines),2/3)*3)] for i in range(len(lines))]\n print(colors)\n\n line_set = o3d.geometry.LineSet()\n line_set.points = o3d.utility.Vector3dVector(points)\n line_set.lines = o3d.utility.Vector2iVector(lines)\n line_set.colors = o3d.utility.Vector3dVector(colors)\n return line_set\n\n\ndef preview(path):\n with open(os.path.join(path,'Scan.json'),'r') as f:\n data = json.load(f)['data']\n for piece in data:\n coord_mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()\n mesh = o3d.io.read_triangle_mesh(os.path.join(path,'Scan.obj'))\n mat = piece['matrix']\n mesh.transform(mat)\n bbox = align.getBboxOfAlignedY(mesh)\n points = bbox.get_box_points()\n points = [points[x] for x in points_index]\n box_shape=drawbox(points)\n\n coord_mesh.translate(points[0])\n o3d.visualization.draw_geometries([box_shape,mesh,coord_mesh])\n\ndef process(path):\n with open(os.path.join(path,'Scan.json'),'r') as f:\n data = json.load(f)['data']\n out_data=[]\n for piece in data:\n out_data_piece={}\n mesh = o3d.io.read_triangle_mesh(os.path.join(path,'Scan.obj'))\n mat = piece['matrix']\n mesh.transform(mat)\n rot = align.getOrientationOfAlignedY(mesh)\n rot = utils.getAffineMat(rot)\n mesh.transform(rot)\n mat = rot@mat\n bbox = mesh.get_axis_aligned_bounding_box()\n min = bbox.get_min_bound()\n max = bbox.get_max_bound()\n out_data_piece['matrix']=mat\n out_data_piece['extent']={'min':min,'max':max}\n out_data.append(out_data_piece)\n np.save(os.path.join(path,'align.npy'),out_data)\n\n\npaths = glob.glob(os.path.join(dir_path,'*','*','Scan'))\nfor p in paths:\n print(os.path.abspath(p))\n try:\n process(p)\n except Exception as e:\n with open(os.path.join(p,'error_align.log'),'w') as f:\n f.write(str(e))","repo_name":"FigoHunter/omniobject3d","sub_path":"scripts/align_box.py","file_name":"align_box.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9218484229","text":"from enum import Enum\n\n\nclass StrategyType(Enum):\n MU_PLUS = 0\n MU_COMMA = 1\n DEFAULT = MU_PLUS\n\n @staticmethod\n def make(type_name: str = None) -> 'StrategyType':\n if type_name:\n if type_name.upper() == StrategyType.MU_PLUS.name:\n return StrategyType.MU_PLUS\n elif type_name.upper() == StrategyType.MU_COMMA.name:\n return StrategyType.MU_COMMA\n return StrategyType.DEFAULT\n\n\nclass CrossoverType(Enum):\n ONE_POINT = 1\n UNIFORM = 2\n DEFAULT = UNIFORM\n\n @staticmethod\n def make(type_name: str = None) -> 'CrossoverType':\n if type_name:\n if type_name.upper() == CrossoverType.ONE_POINT.name:\n return CrossoverType.ONE_POINT\n elif type_name.upper() == CrossoverType.UNIFORM.name:\n return CrossoverType.UNIFORM\n return CrossoverType.DEFAULT\n\n\nclass MutationType(Enum):\n INSERTION = 0\n EXCHANGE = 1\n SCRAMBLE = 2\n DEFAULT = INSERTION\n\n @staticmethod\n def make(type_name: str = None) -> 'MutationType':\n if type_name:\n if type_name.upper() == MutationType.INSERTION.name:\n return MutationType.INSERTION\n elif type_name.upper() == MutationType.EXCHANGE.name:\n return MutationType.EXCHANGE\n elif type_name.upper() == MutationType.SCRAMBLE.name:\n return MutationType.SCRAMBLE\n return MutationType.DEFAULT\n","repo_name":"BartlomiejOlber/PSZT-KomiwojazerEwolucyjny","sub_path":"model/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29792064216","text":"from discord.channel import VoiceChannel\nfrom discord.ext import commands\nimport asyncio, discord\n\nintents = discord.Intents.all()\nprefix = \"&\"\nclient = commands.Bot(command_prefix=prefix, case_insensitive=True, intents=intents)\nclient.remove_command('help')\nclient.load_extension('cogs.main')\n \n\n@client.event\nasync def on_ready():\n print('Bot is online.')\n await client.change_presence(\n activity=discord.Activity(type=discord.ActivityType.playing, name=\"im Irrenhaus\"))\n\n@client.command()\nasync def help(ctx):\n commands = '''\n`&help` -> Zeigt dir dies.\nZeigt dir alle verfügbaren Befehle an und weitere relevante Informationen.\n\n`&level` -> Zeigt dein Level an.\nDu erhälst Level indem du Nachrichten auf dem Discord versendest.\n\n`&giveaway <#channel> <gewinner> <Gewinn>` -> Startet ein Giveaway.\n!!Nur für Teammitglieder nutzbar!!.\n'''\n\n helpEmbed = discord.Embed(title='Hilfe', description=f'Alle verfügbaren Befehle:\\n\\n{commands}')\n\n await ctx.send(embed=helpEmbed)\n\n# Run\nclient.run(open('token.txt').readline())","repo_name":"maxdemuke/irrenhausbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10718183350","text":"# -*- coding: utf-8 -*-\r\nimport json\r\nimport logging\r\nfrom django.core.urlresolvers import reverse\r\nfrom django.http import Http404, HttpResponse, HttpResponseRedirect\r\nfrom django.shortcuts import render\r\nfrom django.utils.encoding import smart_text\r\n\r\nfrom bviewer.archive.controllers import ZipArchiveController\r\nfrom bviewer.core.controllers import get_gallery, AlbumController\r\nfrom bviewer.core.files.response import download_response\r\nfrom bviewer.core.views import message_view\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nGALLERY_NOT_FOUND = 'No gallery found'\r\nALBUM_NOT_FOUND = 'No album found'\r\nNOT_ALBUM = 'It is not album with images'\r\nNOT_ALLOW_ARCHIVING = 'Archiving is disabled for this album'\r\n\r\n\r\ndef index_view(request, gid):\r\n \"\"\"\r\n Start to archive images, or if done redirect to download\r\n js - waite while done, after redirect to download\r\n \"\"\"\r\n gallery = get_gallery(request)\r\n if not gallery:\r\n return message_view(request, message=GALLERY_NOT_FOUND)\r\n\r\n controller = AlbumController(gallery, request.user, uid=gid)\r\n if not controller.exists():\r\n return message_view(request, message=ALBUM_NOT_FOUND)\r\n\r\n if not controller.is_album():\r\n return message_view(request, message=NOT_ALBUM)\r\n\r\n if not controller.is_archiving_allowed():\r\n return message_view(request, message=NOT_ALLOW_ARCHIVING)\r\n\r\n image_paths = [i.path for i in controller.get_images()]\r\n z = ZipArchiveController(image_paths, gallery)\r\n\r\n # links for redirect to download, and check status\r\n redirect = reverse('archive.download', kwargs=dict(gid=gid, uid=z.uid))\r\n link = reverse('archive.status', kwargs=dict(gid=gid, uid=z.uid))\r\n main = controller.get_object()\r\n\r\n if z.status == 'DONE':\r\n return HttpResponseRedirect(redirect)\r\n\r\n z.add_job()\r\n return render(request, 'archive/download.html', {\r\n 'gallery': gallery,\r\n 'path': request.path,\r\n 'link': link,\r\n 'redirect': redirect,\r\n 'album': main,\r\n 'back': dict(album_id=main.id),\r\n })\r\n\r\n\r\ndef status_view(request, gid, uid):\r\n \"\"\"\r\n Check if archive exists and ready for download\r\n \"\"\"\r\n gallery = get_gallery(request)\r\n if not gallery:\r\n raise Http404(GALLERY_NOT_FOUND)\r\n\r\n controller = AlbumController(gallery, request.user, gid)\r\n if not controller.exists():\r\n return HttpResponse(json.dumps(dict(error=ALBUM_NOT_FOUND)))\r\n\r\n if not controller.is_album():\r\n return HttpResponse(json.dumps(dict(error=NOT_ALBUM)))\r\n\r\n if not controller.is_archiving_allowed():\r\n return HttpResponse(json.dumps(dict(error=NOT_ALLOW_ARCHIVING)))\r\n\r\n image_paths = [i.path for i in controller.get_images()]\r\n z = ZipArchiveController(image_paths, gallery, name=uid)\r\n data = dict(status=z.status, album=gid, uid=uid, progress=z.progress)\r\n\r\n return HttpResponse(json.dumps(data))\r\n\r\n\r\ndef download_view(request, gid, uid):\r\n \"\"\"\r\n Download archive\r\n \"\"\"\r\n gallery = get_gallery(request)\r\n if not gallery:\r\n raise Http404(GALLERY_NOT_FOUND)\r\n\r\n controller = AlbumController(gallery, request.user, uid=gid)\r\n if not controller.exists():\r\n raise Http404(ALBUM_NOT_FOUND)\r\n\r\n if not controller.is_album():\r\n return message_view(request, message=NOT_ALBUM)\r\n\r\n if not controller.is_archiving_allowed():\r\n return message_view(request, message=NOT_ALLOW_ARCHIVING)\r\n\r\n image_paths = [i.path for i in controller.get_images()]\r\n z = ZipArchiveController(image_paths, gallery, name=uid)\r\n\r\n if z == 'NONE':\r\n raise Http404('No file found')\r\n\r\n main = controller.get_object()\r\n logger.info(smart_text('download archive \"%s\"'), main.title)\r\n name = smart_text('{0} - {1}.zip').format(main.time.strftime('%Y-%m-%d'), main.title)\r\n return download_response(z.archive, name=name)","repo_name":"b7w/bviewer","sub_path":"bviewer/archive/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40693692809","text":"import sys, os\nimport sqlite3\nimport itertools\n\nDB = os.environ.get(\"MOBILE_DB\")\n\nif not DB:\n raise Exception(\"MOBILE_DB not set\")\n\ndef connect():\n return sqlite3.connect(DB)\n\ndef stream_raw_readings():\n db = connect()\n sql = \"\"\"\n SELECT stamp, ssid, bssid, strength \n FROM scan\n ORDER BY stamp ASC\n \"\"\"\n c = db.cursor()\n c.execute(sql)\n colnames = [x[0] for x in c.description]\n while True:\n try:\n row = c.fetchone()\n yield dict(zip(colnames, row))\n if not row:\n break\n except:\n break\n \n db.close()\n\n\ndef stream_samples():\n for stamp, readings in itertools.groupby(\n stream_raw_readings(),\n key=lambda x: x[\"stamp\"]):\n readings = [x for x in readings if x[\"bssid\"] and not x[\"strength\"] == 0]\n yield dict(stamp=stamp, readings=readings)\n\n\ndef sample_similarity(x, y):\n r1, r2 = set(x[\"readings\"]), set(y[\"readings\"])\n return similarity(r1, r2)\n\ndef slide_window(stream, k):\n window = [None for i in range(k)]\n\n for i, x in enumerate(stream):\n window[0:k-1] = window[1:k]\n window[k-1] = x\n if i >= k-1:\n yield list(window)\n\ndef similarity(x, y):\n n1, n2 = len(x), len(y)\n if min(n1, n2) == 0 and n1 + n2 > 0:\n return 0\n if n1 == 0 and n2 == 0:\n return None\n n = len(x.intersection(y))\n return float(n)/max(n1,n2)\n\n","repo_name":"kenpu/mobile-ssid-analysis","sub_path":"src/location/mobile.py","file_name":"mobile.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70021502666","text":"from django.shortcuts import render_to_response, get_object_or_404\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.template import RequestContext\nfrom django.db import DatabaseError\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom simple_budget.models.transaction.transaction_category import \\\n TransactionCategory\nfrom simple_budget.forms.transaction.add_edit_transaction_category_form import \\\n AddEditTransactionCategoryForm\nfrom simple_budget.forms.transaction.delete_transaction_category_form import \\\n DeleteTransactionCategoryForm\nfrom simple_budget.models.transaction.transaction import Transaction\nfrom simple_budget.models.transaction.transaction_line import TransactionLine\nfrom simple_budget.forms.transaction.upload_quicken_file_form import \\\n UploadQuickenFileForm\nfrom simple_budget.forms.transaction.add_edit_transaction_form import \\\n AddEditTransactionForm\nfrom simple_budget.forms.transaction.delete_transaction_form import \\\n DeleteTransactionForm\nfrom simple_budget.models.transaction.qif_parser import QIFParser\nfrom simple_budget.helper.date_calculation import DateCalculation\nfrom simple_budget.helper.helper import clean_message_from_url\nfrom django.conf import settings\nimport json\nimport re\n\n\n@login_required\ndef transactions(request):\n \"\"\"\n display transaction log\n \"\"\"\n prev_month, next_month, start_date, end_date, today = \\\n DateCalculation.calculate_dates(request.GET.get('date', None))\n\n sort, monthly_transactions = \\\n TransactionLine.transaction_lines(start_date, end_date,\n request.GET.get('sort', None))\n\n return render_to_response('transaction/transactions.html',\n {'date': today,\n 'sort': sort,\n 'next_month': next_month,\n 'prev_month': prev_month,\n 'transactions': monthly_transactions},\n context_instance=RequestContext(request))\n\n@login_required\n@user_passes_test(lambda u: u.is_superuser,\n login_url='/?message=no_permissions_error',\n redirect_field_name=None)\ndef add_edit_transaction(request, action, transaction_line_id):\n \"\"\"\n add/edit a transaction\n :param request:\n :return:\n \"\"\"\n if action == 'edit':\n message = 'transaction_edit'\n transaction_line = get_object_or_404(TransactionLine,\n pk=transaction_line_id)\n else:\n message = 'transaction_add'\n\n if request.method == 'POST':\n if request.POST.get('submit', None) != 'Submit':\n return HttpResponseRedirect(request.POST.get('referer', '/'))\n\n form = AddEditTransactionForm(request.POST)\n\n if form.is_valid():\n referer = request.POST.get('referer', '/transactions/?date=')\n\n if re.search('\\?', referer):\n sep = '&'\n else:\n sep = '?'\n\n try:\n Transaction().add_edit_transaction(action, form.cleaned_data)\n return HttpResponseRedirect(\n '%s%smessage=%s_success' % (referer, sep, message,))\n except DatabaseError:\n return HttpResponseRedirect(\n '%s%smessage=%s_failure' % (referer, sep, message,))\n\n else:\n referer = \\\n clean_message_from_url(request.META.get('HTTP_REFERER', None))\n\n if action == 'edit':\n form = AddEditTransactionForm(\n initial={'referer':\n referer,\n 'transaction_line_id':\n transaction_line.pk,\n 'account_id':\n transaction_line.transaction.account_id,\n 'transaction_category_id':\n transaction_line.transaction_category_id,\n 'transaction_date':\n transaction_line.transaction.transaction_date,\n 'amount':\n transaction_line.amount})\n else:\n form = AddEditTransactionForm(\n initial={'referer': referer})\n\n return render_to_response('transaction/add_edit_transaction.html',\n {'form': form,\n 'action': action},\n context_instance=RequestContext(request))\n\n@login_required\n@user_passes_test(lambda u: u.is_superuser,\n login_url='/?message=no_permissions_error',\n redirect_field_name=None)\ndef delete_transaction(request, transaction_line_id):\n \"\"\"\n deletes the supplied transaction\n :param request:\n :return:\n \"\"\"\n transaction_line = get_object_or_404(TransactionLine,\n pk=transaction_line_id)\n referer = \\\n clean_message_from_url(request.META.get('HTTP_REFERER', None))\n\n if request.method == 'POST':\n if request.POST.get('submit', None) == 'Cancel':\n return HttpResponseRedirect(request.POST.get('referer', '/'))\n\n form = DeleteTransactionForm(request.POST)\n\n if form.is_valid():\n try:\n Transaction().delete_transaction(form.cleaned_data)\n return HttpResponseRedirect(\n '/transactions/?message=transaction_delete_success')\n except DatabaseError:\n return HttpResponseRedirect(\n '/transactions/?message=transaction_delete_failure')\n\n\n form = DeleteTransactionForm(\n initial={'transaction_line_id': transaction_line.pk,\n 'referer': referer})\n\n return render_to_response('transaction/delete_transaction.html',\n {'form': form,\n 'refer': referer},\n context_instance=RequestContext(request))\n\n@login_required\ndef category(request):\n \"\"\"\n displays transaction category --> budget category mapping\n \"\"\"\n budget_category_id = request.GET.get('bc', None)\n sort, transaction_categories = \\\n TransactionCategory.transaction_category_mapping(\n request.GET.get('sort', None), budget_category_id)\n\n return render_to_response('transaction/category.html',\n {'sort': sort,\n 'budget_category_id': budget_category_id,\n 'transaction_categories': transaction_categories},\n context_instance=RequestContext(request))\n\n@login_required\n@user_passes_test(lambda u: u.is_superuser,\n login_url='/?message=no_permissions_error',\n redirect_field_name=None)\ndef add_edit_transaction_category(request, action,\n transaction_category_id=None):\n \"\"\"\n adds/edits budget category\n :param request:\n :return:\n \"\"\"\n referer = \\\n clean_message_from_url(request.META.get('HTTP_REFERER', None))\n\n if transaction_category_id:\n transaction_category_has_children = \\\n bool(TransactionCategory.objects.filter(\n transaction_category_parent_id=transaction_category_id))\n else:\n transaction_category_has_children = None\n\n if request.method == 'POST':\n if request.POST.get('submit', None) == 'Cancel':\n return HttpResponseRedirect(request.POST.get('referer', '/'))\n\n form = AddEditTransactionCategoryForm(request.POST)\n if form.is_valid():\n try:\n if form.cleaned_data['transaction_category_id']:\n transaction_category = \\\n TransactionCategory(\n transaction_category_id=\n form.cleaned_data['transaction_category_id'],\n transaction_category_parent_id=\n form.cleaned_data['transaction_category_parent_id'],\n budget_category_id=\n form.cleaned_data['budget_category'],\n transaction_category=\n form.cleaned_data['transaction_category'])\n else:\n transaction_category = \\\n TransactionCategory(\n transaction_category_parent_id=\n form.cleaned_data['transaction_category_parent_id'],\n budget_category_id=\n form.cleaned_data['budget_category'],\n transaction_category=\n form.cleaned_data['transaction_category'])\n\n transaction_category.save()\n\n if transaction_category.pk:\n message = 'success'\n else:\n message = 'failure'\n except DatabaseError:\n message = 'failure'\n\n return HttpResponseRedirect('/transaction/category/?'\n 'message=transaction_category_%s_%s'\n % (action, message,))\n\n else:\n if action == 'edit' and transaction_category_id:\n transaction_category = get_object_or_404(TransactionCategory,\n pk=transaction_category_id)\n form = AddEditTransactionCategoryForm(\n initial={'referer': referer,\n 'transaction_category_id':\n transaction_category.transaction_category_id,\n 'transaction_category_parent_id':\n transaction_category.transaction_category_parent_id,\n 'transaction_category':\n transaction_category.transaction_category,\n 'budget_category':\n transaction_category.budget_category_id})\n\n else:\n form = AddEditTransactionCategoryForm(initial={'referer': referer})\n\n return render_to_response('transaction/add_edit_transaction_category.html',\n {'form': form,\n 'action': action,\n 'transaction_category_has_children':\n transaction_category_has_children},\n context_instance=RequestContext(request))\n\n@login_required\n@user_passes_test(lambda u: u.is_superuser,\n login_url='/?message=no_permissions_error',\n redirect_field_name=None)\ndef delete_transaction_category(request, transaction_category_id):\n \"\"\"\n deletes the supplied transaction category\n :param request:\n :return:\n \"\"\"\n transaction_category = get_object_or_404(TransactionCategory,\n pk=transaction_category_id)\n referer = \\\n clean_message_from_url(request.META.get('HTTP_REFERER', None))\n\n transaction_lines = \\\n TransactionLine.objects.filter(transaction_category_id=\n transaction_category_id).count()\n\n transaction_category_children = \\\n bool(TransactionCategory.objects.filter(\n transaction_category_parent_id=transaction_category_id).count())\n\n if request.method == 'POST':\n if (request.POST.get('submit', None) == 'Cancel' or\n transaction_category_children):\n return HttpResponseRedirect(request.POST.get('referer', '/'))\n\n form = \\\n DeleteTransactionCategoryForm(\n request.POST,\n current_tc_id=transaction_category_id,\n select_new_category=bool(transaction_lines))\n\n if form.is_valid():\n try:\n if form.cleaned_data['transfer_transaction_category_id']:\n TransactionLine.objects.filter(transaction_category_id=\n transaction_category_id).\\\n update(transaction_category_id=\n form.cleaned_data['transfer_transaction_category_id'])\n\n transaction_category.delete()\n\n return HttpResponseRedirect(\n '/transaction/category/?'\n 'message=transaction_category_delete_success')\n except DatabaseError:\n return HttpResponseRedirect(\n '/transaction/category/?'\n 'message=transaction_category_delete_failure')\n else:\n form = DeleteTransactionCategoryForm(\n current_tc_id=transaction_category_id,\n initial={'transaction_category_id': transaction_category.pk,\n 'referer': referer})\n\n return render_to_response('transaction/delete_transaction_category.html',\n {'form': form,\n 'transaction_lines': transaction_lines,\n 'transaction_category_children':\n transaction_category_children,\n 'refer': referer},\n context_instance=RequestContext(request))\n\n@login_required\n@user_passes_test(lambda u: u.is_superuser,\n login_url='/?message=no_permissions_error',\n redirect_field_name=None)\ndef upload_quicken_file(request):\n \"\"\"\n processes an uploaded quicken file\n \"\"\"\n if not settings.QUICKEN_IMPORT_ACTIVE:\n return HttpResponseRedirect('/')\n\n if request.method == 'POST':\n if request.POST.get('submit', None) == 'Cancel':\n return HttpResponseRedirect(request.POST.get('referer', '/'))\n\n form = UploadQuickenFileForm(request.POST, request.FILES)\n if form.is_valid():\n if Transaction.process_upload_quicken_file(request.FILES['file']):\n return HttpResponseRedirect('/budget/?message=upload_success')\n else:\n return HttpResponseRedirect('/budget/?message=upload_failure')\n\n else:\n referer = \\\n clean_message_from_url(request.META.get('HTTP_REFERER', None))\n form = UploadQuickenFileForm(initial={'referer': referer})\n\n return render_to_response('transaction/upload_quicken_file.html',\n {'form': form},\n context_instance=RequestContext(request))\n\n@login_required\ndef upload_quicken_file_status(request):\n \"\"\"\n gets the status for the last uploaded qif file\n :return:\n \"\"\"\n return HttpResponse(json.dumps({'status': QIFParser.get_status()}),\n content_type='application/json')","repo_name":"buzz1274/simple_budget","sub_path":"simple_budget/views/transaction/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":14877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5030202740","text":"import datetime as dt\nimport os\nimport math\nimport numpy as np\nimport pandas as pd\nfrom util import get_data, plot_data\n\ndef compute_portvals(\n trades_df,\n start_val=1000000,\n commission=9.95,\n impact=0.005,\n):\n trades_df = trades_df.sort_index()\n sd = trades_df.index[0] # get start date\n ed = trades_df.index[-1] # get end date\n dates = pd.date_range(sd, ed)\n sym = trades_df.columns[0]\n prices = get_data([sym], dates)\n prices = prices.drop(['SPY'], axis=1)\n prices['CASH'] = np.ones(prices.shape[0])\n day_trades = prices.copy()\n day_trades.iloc[:] = 0\n day_trades['CASH'] = np.zeros(trades_df.shape[0])\n trade_rows = trades_df.iterrows()\n\n for index, row in trade_rows:\n if row[sym] > 0:\n day_trades.at[index, sym] += row[sym]\n sym_price = prices.at[index, sym]\n buy_price = row[sym] * sym_price * (1 + impact) + commission\n day_trades.at[index, 'CASH'] -= buy_price\n elif row[sym] < 0:\n day_trades.at[index, sym] += row[sym]\n sym_price = prices.at[index, sym]\n sell_price = -row[sym] * sym_price * (1 - impact) - commission\n day_trades.at[index, 'CASH'] += sell_price\n elif row[sym] == 0:\n continue\n\n holdings = day_trades.copy()\n holdings.iloc[0]['CASH'] += start_val\n for i in range(1, len(holdings)):\n holdings.iloc[i] = holdings.iloc[i - 1] + holdings.iloc[i]\n holdings = prices * holdings\n holdings['Values'] = holdings.sum(axis=1)\n portvals = holdings['Values']\n return portvals\n\ndef author():\n return \"vpatel436\"\n\nif __name__ == \"__main__\":\n print('test')","repo_name":"Vnp112/Strategy-Evaluation","sub_path":"marketsimcode.py","file_name":"marketsimcode.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37807406107","text":"'''\r\nThis module contains code that search the database in an attempt to \r\nautomatically find a good match for a given ComicBook object. \r\n\r\n@author: Cory Banack\r\n'''\r\nfrom dbmodels import IssueRef\r\nimport db\r\nimport dbutils\r\nfrom matchscore import MatchScore\r\nimport imagehash\r\nimport utils\r\n\r\n# when comparing two comic covers, they must be this similar or greater\r\n# (when using imagehash.similarity()) to be considered \"the same\"\r\n__MATCH_THRESHOLD = 0.87\r\n\r\n#==============================================================================\r\ndef find_series_ref(book, config):\r\n ''' \r\n Performs a number of queries on the database, in an attempt to find a \r\n SeriesRef object that strongly matches the given book. A variety of \r\n techniques are employed, including checking for matching issue numbers in\r\n the prospective series, and image matching the cover of the prospective \r\n issue in the prospective series. The user's search and filtering \r\n preferences (in 'config') are also taken into account.\r\n \r\n Returns None if no clear seroes identification could be made. \r\n '''\r\n \r\n # tests to see if two hashes are close enough to be considered \"the same\"\r\n def are_the_same(hash1, hash2):\r\n x = imagehash.similarity(hash1, hash2)\r\n return x > __MATCH_THRESHOLD\r\n \r\n \r\n retval = None\r\n series_ref = __find_best_series(book, config)\r\n if series_ref:\r\n matches = False\r\n hash_local = __get_local_hash(book)\r\n if hash_local:\r\n # 1. convert SeriesRef + issue num to an IssueRef iff its possible.\r\n ref = db.query_issue_ref(series_ref, book.issue_num_s) \\\r\n if book.issue_num_s else series_ref\r\n ref = series_ref if not ref else ref\r\n\r\n # 2. see if the local and remote hashes match up\r\n hash_remote = __get_remote_hash(ref)\r\n matches = are_the_same(hash_local, hash_remote)\r\n \r\n # 3. if the given ref is an IssueRef, we can try to load the issue's\r\n # additional cover images and see if any of them match, too.\r\n if not matches and type(ref) == IssueRef: \r\n issue = db.query_issue(ref, True)\r\n if issue:\r\n for ref in issue.image_urls_sl:\r\n hash_remote = __get_remote_hash(ref)\r\n matches = are_the_same(hash_local, hash_remote)\r\n if matches: break\r\n retval = series_ref if matches else None\r\n \r\n return retval;\r\n\r\n#==============================================================================\r\ndef __find_best_series(book, config): \r\n ''' \r\n Queries the databse to find a best guess for a series matching the given\r\n ComicBook, based on its name, year, issue number, and other text attributes.\r\n \r\n Returns SeriesRef if a reasonable guess was found, or None if one wasn't.\r\n '''\r\n \r\n # 1. obtain SeriesRefs for this book, removing some as dictated by prefs\r\n series_refs = db.query_series_refs( book.series_s, \r\n config.ignored_searchterms_sl )\r\n series_refs = dbutils.filter_series_refs( \r\n series_refs,\r\n config.ignored_publishers_sl, \r\n config.ignored_before_year_n,\r\n config.ignored_after_year_n,\r\n config.never_ignore_threshold_n)\r\n\r\n # 2. obtain the first, second, and third best matching SeriesRefs for the\r\n # given book, if there are any.\r\n primary = None\r\n secondary = None \r\n tertiary = None \r\n if len(series_refs) > 0:\r\n mscore = MatchScore()\r\n def find_best_score( refs ):\r\n return reduce( lambda x,y: x if mscore.compute_n(book, x) \r\n >= mscore.compute_n(book,y) else y, refs) if refs else None\r\n primary = find_best_score(series_refs)\r\n if primary:\r\n series_refs.remove(primary)\r\n secondary = find_best_score(series_refs)\r\n if secondary:\r\n series_refs.remove(secondary)\r\n tertiary = find_best_score(series_refs)\r\n \r\n # 3. if our book is the first (or unknown) issue, figure out if the best \r\n # matching series has a similar cover to the second or third best.\r\n # if it does, we're probably dealing with a trade paperback and a \r\n # regular issue, and we can't find the best series reliably, so we bail\r\n is_first_issue = (lambda i : not i or \\\r\n (utils.is_number(i) and float(i)==1.0))(book.issue_num_s)\r\n if is_first_issue and primary and secondary:\r\n too_similar = False\r\n SIMILARITY_THRESHOLD = __MATCH_THRESHOLD - 0.10\r\n hash1 = __get_remote_hash(primary)\r\n hash2 = __get_remote_hash(secondary)\r\n if imagehash.similarity(hash1, hash2) > SIMILARITY_THRESHOLD:\r\n too_similar = True\r\n elif tertiary:\r\n hash3 = __get_remote_hash(tertiary)\r\n if imagehash.similarity(hash1, hash3) > SIMILARITY_THRESHOLD:\r\n too_similar = True\r\n primary = None if too_similar else primary\r\n \r\n return primary\r\n \r\n\r\n#==============================================================================\r\ndef __get_local_hash(book):\r\n ''' \r\n Gets the image hash for the cover of the give ComicBook object. Returns\r\n None if the cover image was empty or couldn't be hashed for any reason.\r\n ''' \r\n hash = None # matches nothing\r\n try:\r\n image = book.create_image_of_page(0) if book else None;\r\n if image:\r\n image = utils.strip_back_cover(image)\r\n hash = imagehash.hash(image)\r\n finally:\r\n if \"image\" in locals() and image: image.Dispose()\r\n return hash \r\n\r\n\r\n#==============================================================================\r\ndef __get_remote_hash(ref):\r\n ''' \r\n Gets the image hash for a remote comic book resource. This resource\r\n can be a SeriesRef (hashes series art), an IssueRef (hashes the \r\n first issue cover) or a URL to an image on the web.\r\n \r\n Returns None if the ref led to an image that was empty or \r\n couldn't be hashed for any reason.\r\n ''' \r\n hash = None # matches nothing\r\n try:\r\n image = db.query_image(ref) if ref else None\r\n if image:\r\n image = utils.strip_back_cover(image)\r\n hash = imagehash.hash(image)\r\n finally:\r\n if \"image\" in locals() and image: image.Dispose()\r\n return hash \r\n","repo_name":"cbanack/comic-vine-scraper","sub_path":"src/py/utils/automatcher.py","file_name":"automatcher.py","file_ext":"py","file_size_in_byte":6320,"program_lang":"python","lang":"en","doc_type":"code","stars":216,"dataset":"github-code","pt":"81"} +{"seq_id":"25933549653","text":"\"\"\"Benchmark.\"\"\"\n\nimport argparse\nfrom collections import namedtuple\nimport datetime\nimport logging\nimport os\n\nimport openpifpaf.benchmark\n\nLOG = logging.getLogger(__name__)\n\n\nDEFAULT_CHECKPOINTS = [\n 'tshufflenetv2k16',\n]\n\n\nclass CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\n\n\ndef cli():\n parser = argparse.ArgumentParser(\n prog='python3 -m openpifpaf.benchmark',\n description=__doc__,\n formatter_class=CustomFormatter,\n )\n parser.add_argument('--version', action='version',\n version='OpenPifPaf {version}'.format(version=openpifpaf.__version__))\n\n parser.add_argument('--output', default=None,\n help='output file name')\n parser.add_argument('--checkpoints', default=DEFAULT_CHECKPOINTS, nargs='+',\n help='checkpoints to evaluate')\n parser.add_argument('--crowdpose', default=False, action='store_true')\n parser.add_argument('--ablation-1', default=False, action='store_true')\n parser.add_argument('--ablation-2', default=False, action='store_true')\n parser.add_argument('--ablation-3', default=False, action='store_true')\n parser.add_argument('--ablation-4', default=False, action='store_true')\n parser.add_argument('--ablation-5', default=False, action='store_true')\n group = parser.add_argument_group('logging')\n group.add_argument('--debug', default=False, action='store_true',\n help='print debug messages')\n args, eval_args = parser.parse_known_args()\n\n logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG)\n\n # default eval_args\n if not eval_args:\n eval_args = ['--loader-workers=2']\n\n # default loader workers\n if not any(l.startswith('--loader-workers') for l in eval_args):\n LOG.info('adding \"--loader-workers=2\" to the argument list')\n eval_args.append('--loader-workers=2')\n\n # default dataset\n if not any(l.startswith('--dataset') for l in eval_args):\n if args.crowdpose:\n LOG.info('adding \"--dataset=crowdpose\" to the argument list')\n eval_args.append('--dataset=crowdpose')\n if not any(l.startswith('--force-complete-pose') for l in eval_args):\n LOG.info('adding \"--force-complete-pose\" to the argument list')\n eval_args.append('--force-complete-pose')\n if not any(l.startswith('--seed-threshold') for l in eval_args):\n LOG.info('adding \"--seed-threshold=0.2\" to the argument list')\n eval_args.append('--seed-threshold=0.2')\n if not any(l.startswith('--crowdpose-eval-test') for l in eval_args):\n LOG.info('adding \"--crowdpose-eval-test\" to the argument list')\n eval_args.append('--crowdpose-eval-test')\n if not any(l.startswith('--decoder') for l in eval_args):\n LOG.info('adding \"--decoder=cifcaf:0\" to the argument list')\n eval_args.append('--decoder=cifcaf:0')\n else:\n LOG.info('adding \"--dataset=posetrack2018\" to the argument list')\n eval_args.append('--dataset=posetrack2018')\n if not any(l.startswith('--write-predictions') for l in eval_args):\n LOG.info('adding \"--write-predictions\" to the argument list')\n eval_args.append('--write-predictions')\n if not any(l.startswith('--decoder') for l in eval_args):\n LOG.info('adding \"--decoder=trackingpose:0\" to the argument list')\n eval_args.append('--decoder=trackingpose:0')\n\n # generate a default output filename\n if args.output is None:\n now = datetime.datetime.now().strftime('%y%m%d-%H%M%S')\n args.output = 'outputs/benchmark-{}/'.format(now)\n os.makedirs(args.output)\n\n return args, eval_args\n\n\ndef main():\n args, eval_args = cli()\n Ablation = namedtuple('Ablation', ['suffix', 'args'])\n ablations = [Ablation('', eval_args)]\n\n if args.crowdpose:\n assert all('crowdpose' in c for c in args.checkpoints)\n ablations += [\n Ablation('.easy', eval_args + ['--crowdpose-index=easy']),\n Ablation('.medium', eval_args + ['--crowdpose-index=medium']),\n Ablation('.hard', eval_args + ['--crowdpose-index=hard']),\n ]\n\n if args.ablation_1:\n ablations += [\n Ablation('.greedy', eval_args + ['--greedy']),\n Ablation('.no-reverse', eval_args + ['--no-reverse-match']),\n Ablation('.greedy.no-reverse', eval_args + ['--greedy', '--no-reverse-match']),\n # Ablation('.greedy.dense', eval_args + ['--greedy', '--dense-connections']),\n # Ablation('.dense', eval_args + ['--dense-connections']),\n # Ablation('.dense.hierarchy', eval_args + ['--dense-connections=0.1']),\n ]\n if args.ablation_2:\n ablations += [\n Ablation('.nr.nms', eval_args + ['--ablation-cifseeds-no-rescore',\n '--ablation-cifseeds-nms',\n '--ablation-caf-no-rescore']),\n ]\n if args.ablation_3:\n eval_args_decabl = [\n arg\n for arg in eval_args\n if not arg.startswith(('--instance-threshold=', '--decoder='))\n ]\n ablations += [\n Ablation('.euclidean', eval_args_decabl + ['--decoder=posesimilarity:0',\n '--posesimilarity-distance=euclidean']),\n Ablation('.oks', eval_args_decabl + ['--decoder=posesimilarity:0',\n '--posesimilarity-distance=oks']),\n Ablation('.oks-inflate2', eval_args_decabl + ['--decoder=posesimilarity:0',\n '--posesimilarity-distance=oks',\n '--posesimilarity-oks-inflate=2.0']),\n Ablation('.oks-inflate10', eval_args_decabl + ['--decoder=posesimilarity:0',\n '--posesimilarity-distance=oks',\n '--posesimilarity-oks-inflate=10.0']),\n ]\n if args.ablation_4:\n ablations += [\n Ablation('.w513', eval_args + ['--posetrack-eval-long-edge=513']),\n Ablation('.w641', eval_args + ['--posetrack-eval-long-edge=641']),\n Ablation('.w1201', eval_args + ['--posetrack-eval-long-edge=1201']),\n ]\n if args.ablation_5:\n ablations += [\n Ablation('.recovery', eval_args + ['--trackingpose-track-recovery']),\n ]\n\n configs = [\n openpifpaf.benchmark.Config(checkpoint, ablation.suffix, ablation.args)\n for checkpoint in args.checkpoints\n for ablation in ablations\n ]\n openpifpaf.benchmark.Benchmark(\n configs,\n args.output,\n reference_config=configs[0] if len(args.checkpoints) == 1 and not args.crowdpose else None,\n stat_scale=100.0 if args.crowdpose else 1.0,\n ).run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"openpifpaf/openpifpaf","sub_path":"src/openpifpaf/plugins/posetrack/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":7208,"program_lang":"python","lang":"en","doc_type":"code","stars":1098,"dataset":"github-code","pt":"81"} +{"seq_id":"25685009965","text":"# -*- coding: utf-8 -*-\nimport torch\nimport numpy as np \nimport argparse\nimport pickle \nimport os\nfrom os import listdir, getcwd\nimport os.path as osp\nimport glob\nimport torchvision\nfrom torchvision import transforms \nimport torch.backends.cudnn as cudnn\nfrom darknet import Darknet\nfrom PIL import Image\nfrom util import *\nimport cv2\nimport pickle as pkl\nimport random\nfrom preprocess import prep_image\nfrom preprocess import automatic_brightness_and_contrast\nimport natsort\n\nimport resnet50 as model_n \nimport matplotlib.pyplot as plt\nimport sys\n\n#file based demo progarm\n#funtion : upper + lower , upper only, lower only\n\ntransform_test = transforms.Compose([\n transforms.Resize(size=(256, 128)),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n ])\n# \"winter scarf\", \"cane\", \"bag\", \"shoes\", \"hat\", \"face\"]\n#attribute categories = #6\ncolors_a = [\"----\", \"white\", \"black\", \"gray\", \"pink\", \"red\", \"green\", \"blue\", \"brown\", \"navy\", \"beige\", \\\n \"yellow\", \"purple\", \"orange\", \"mixed-color\", \"other-color\"]\npattern_a = [\"----\", \"plain\", \"checker\", \"dotted\", \"floral\", \"striped\", \"mixed\", \"stripe-horizon\", \"stripe-vertical\", \\\n \"letter\", \"diamond\", \"character\", \"leopard\", \"lace\", \"others\"]\ngender_a = [\"----\", \"man\", \"woman\"]\nseason_a = [\"----\", \"spring\", \"summer\", \"fall\", \"winter\"]\ntop_type_a = [\"----\", \"shirt\", \"jumper\", \"jacket\", \"vest\", \"parka\", \"coat\", \"dress\", \"sweater\", \"t-shirt\", \"top\", \\\n \"blouse\", \"blazer\", \"cardigon\"]\nsleeves_a = [\"----\", \"short-sleeves\", \"long-sleeves\", \"no-sleeves\"]\ntexture_a = [\"normal\", \"normal\", \"fur\", \"denim\", \"leathers\", \"shiny\", \"wool\", \"knit\"]\nbutton_a = [\"none-button\", \"none-button\", \"zipper\", \"button\", \"open\", \"belt\"]\nlength_a = [\"----\", \"short\", \"medium\", \"long\"]\nfit_a = [\"----\", \"normal\", \"slim\", \"loose\"]\ncollar_a = [\"----\", \"none\", \"v-neck\", \"square-neck\", \"round-neck\", \"turtle\", \"v-shape\", \"round-shirt\", \"notched\", \\\n \"off-shoulder\", \"hood\", \"band\"]\n\n #\"winter scarf\", \"cane\", \"bag\", \"shoes\", \"hat\", \"face\"]\n\n#Bottom => 5 attributes are shared by top attributes items: colors, pattern, gender, season, length\n\nbottom_type_a = [\"----\", \"pants\", \"skirt\", \"jeans\", \"tights\", \"hot-pants\", \"suit\", \"capri\", \"leggings\"]\nleg_pose_a = [\"----\", \"standing\", \"sitting\", \"lying\"]\n\n#Acceary -> 4 attributes : color, gender, season shared\nacc_type_a = [\"----\", \"scarf/muffler\", \"cane\", \"bag\", \"shose\", \"hat\", \"sandles\", \"boots\", \"heels\"]\n\n#face attributes : face gender is shared\n\nglasses_a = [\"----\", \"none-glasses\", \"glasses\", \"sun-glasses\"]\n\n# style\nstyle_a = [\"----\", \"none\", \"rocker\", \"casual\", \"comfortable\", \"basic\", \"eclectic\", \"trendy\", \"classic\", \"chic\", \"urban\", \"romantic\", \\\n \"elegant\", \"bohemian\", \"sexy\", \"preppy\", \"denim\", \"military\", \"school\", \"sport\", \"hiking\", \"uniform\", \"suit\"]\n\n\nattribute_pool = [colors_a, pattern_a, gender_a, season_a, top_type_a, sleeves_a, \\\n texture_a, button_a, length_a, fit_a, collar_a , \\\n colors_a, pattern_a, gender_a, season_a, length_a, bottom_type_a, leg_pose_a, \\\n gender_a, glasses_a, style_a]\n\n# Device configuration\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\ndef convert_image_np(inp):\n \"\"\"Convert a Tensor to numpy image.\"\"\"\n # inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\ndef main(args):\n\n #1. Yolov3 : for Human ROI detection\n num_classes = 80\n yolov3 = Darknet(args.cfg_file)\n yolov3.load_weights(args.weights_file)\n yolov3.net_info[\"height\"] = args.reso \n inp_dim = int(yolov3.net_info[\"height\"])\n assert inp_dim % 32 == 0 \n assert inp_dim > 32\n yolov3.to(device)\n yolov3.eval()\n\n print(\"yolo-v3 network successfully loaded\")\n\n attribute_dim = [16, 15, 3, 5, 14, 4, 8, 6, 4, 4, 12, 16, 15, 3, 5, 4, 9, 4, 3, 4, 23] #21개\n\n #2. listing image files from sample directory\n try:\n list_dir = os.listdir(args.test)\n # list_dir.sort(key=lambda f: int(filter(str.isdigit, f)))\n # list_dir.sort(key=lambda x: int(x[:-4]))\n list_dir = natsort.natsorted(list_dir, reverse=False)\n imlist = [osp.join(osp.realpath('.'), args.test, img) for img in list_dir if os.path.splitext(img)[1] =='.jpg' or os.path.splitext(img)[1] == '.jpeg' or os.path.splitext(img)[1] =='.JPG' or os.path.splitext(img)[1] =='.png']\n except NotADirectoryError:\n imlist = []\n imlist.append(osp.join(osp.realpath('.'), args.test))\n print('Not a directory error')\n except FileNotFoundError:\n print (\"No file or directory with the name {}\".format(args.test))\n exit()\n\n #3. loading model\n check_point = torch.load('cfg/'+ args.model+ '_best.pth.tar')\n state_dict = check_point['state_dict']\n if args.arch == 'resnet50':\n model = model_n.__dict__['resnet50'](pretrained=True, num_classes=len(attribute_dim), attribute_dim=attribute_dim)\n \n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict)\n model.to(device)\n model.eval()\n print(args.model)\n \n #4. demo routine for each sample file\n with torch.no_grad():\n for inx, image in enumerate(imlist):\n\n print('\\n'+ list_dir[inx])\n orig_pil_image = Image.open(image)\n image, orig_img, im_dim, orig = prep_image(image, inp_dim)\n im_dim = torch.FloatTensor(im_dim).repeat(1, 2)\n \n image_tensor = image.to(device)\n im_dim = im_dim.to(device)\n \n #4-1. detect candidates for human ROI\n detections = yolov3(image_tensor, device, True) # prediction mode for yolo-v3\n detections = write_results(detections, args.confidence, num_classes, device, nms=True, nms_conf=args.nms_thresh)\n # original image dimension --> im_dim\n\n os.system('clear')\n if type(detections) != int: \n if detections.shape[0]:\n \n im_dim = im_dim.repeat(detections.shape[0], 1)\n scaling_factor = torch.min(inp_dim/im_dim, 1)[0].view(-1, 1)\n \n detections[:, [1, 3]] -= (inp_dim - scaling_factor*im_dim[:, 0].view(-1, 1))/2\n detections[:, [2, 4]] -= (inp_dim - scaling_factor*im_dim[:, 1].view(-1, 1))/2\n\n detections[:, 1:5] /= scaling_factor\n\n small_object_ratio = torch.FloatTensor(detections.shape[0])\n\n for i in range(detections.shape[0]):\n detections[i, [1, 3]] = torch.clamp(detections[i, [1, 3]], 0.0, im_dim[i, 0])\n detections[i, [2, 4]] = torch.clamp(detections[i, [2, 4]], 0.0, im_dim[i, 1])\n\n object_area = (detections[i, 3] - detections[i, 1])*(detections[i, 4] - detections[i, 2])\n orig_img_area = im_dim[i, 0]*im_dim[i, 1]\n small_object_ratio[i] = object_area/orig_img_area\n \n #4-2. remove small human ROI\n detections = detections[small_object_ratio > 0.05]\n im_dim = im_dim[small_object_ratio > 0.05] \n bboxs = detections[:, 1:5].clone()\n \n if detections.size(0) > 0:\n\n Roi = detections.cpu().numpy().astype(int)\n #4-3 space margin for accessory\n rois = []\n for i in range(detections.shape[0]):\n #roi = orig_img[Roi[i][2]:Roi[i][4], Roi[i][1]:Roi[i][3]]\n roi = orig_pil_image.crop([Roi[i][1], Roi[i][2], Roi[i][3], Roi[i][4]])\n # roi.save(str(i)+ list_dir[inx])\n roi = transform_test(roi).unsqueeze(0)\n rois.append(roi)\n \n rois = torch.cat(rois, 0).cuda()\n outputs = model(rois)\n \n #4.3 ouput multi-attributre results for fahion clothing\n for i in range(detections.shape[0]):\n sampled_caption = []\n dress = False\n sampled_caption.append(' top :')\n\n for j in range(len(outputs)):\n #temp = outputs[j][i].data\n max_index = torch.max(outputs[j][i].data, 0)[1]\n word = attribute_pool[j][max_index]\n\n if j == 1 : # pattern\n sampled_caption.append(word + '-pattern')\n elif j == 6 : # texture\n sampled_caption.append(word + '-texture')\n #elif j == 7 : # button\n # sampled_caption.append(word + '-button')\n elif j == 8 : # length\n sampled_caption.append(word + '-length')\n elif j == 9 : # fit\n sampled_caption.append(word + '-fit')\n elif j == 10 : # collar\n sampled_caption.append(word + '-collar \\n bottom :')\n elif j == 12 : # pattern\n sampled_caption.append(word + '-pattern')\n elif j == 17 : \n sampled_caption.append('\\n face :')\n elif j == 19 : \n sampled_caption.append(word + ' \\n style :')\n else:\n sampled_caption.append(word)\n\n sentence = ' '.join(sampled_caption)\n \n print ('\\n'+ str(i+1) + ') ' + '\\n' + sentence)\n write(Roi[i], orig_img, sentence, i+1, coco_classes, colors)\n \n cv2.imshow(\"frame\", orig_img)\n key = cv2.waitKey(0)\n os.system('clear')\n if key & 0xFF == ord('q'): \n break\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser() \n \n \n parser.add_argument('--model', type=str, default='41k_outer', help='path for trained encoder')\n parser.add_argument('--arch', type=str, default='resnet50', help='arch for main model')\n parser.add_argument('--test', type=str, default='image', help='path for vocabulary wrapper')\n \n # Encoder - Yolo-v3 parameters \n parser.add_argument('--confidence', type=float, default = 0.5, help = 'Object Confidence to filter predictions')\n parser.add_argument('--nms_thresh', type=float , default = 0.4, help = 'NMS Threshhold')\n parser.add_argument('--cfg_file', type = str, default = 'cfg/yolov3.cfg', help ='Config file')\n parser.add_argument('--weights_file', type = str, default = 'cfg/yolov3.weights', help = 'weightsfile')\n parser.add_argument('--reso', type=str, default = '416', help = 'Input resolution of the network. Increase to increase accuracy. Decrease to increase speed')\n parser.add_argument('--scales', type=str, default = '1,2,3', help = 'Scales to use for detection')\n\n args = parser.parse_args()\n \n coco_classes = load_classes('cfg/coco.names')\n colors = pkl.load(open(\"cfg/pallete2\", \"rb\"))\n \n main(args)\n \n\n \n","repo_name":"chankyupark/fashion_attribute","sub_path":"file_demo_outer.py","file_name":"file_demo_outer.py","file_ext":"py","file_size_in_byte":11985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10079009936","text":"from re import I\nimport sys\n\nmorse = {'A': '.-', 'B': '-...', 'C': '-.-.', \n 'D': '-..', 'E': '.', 'F': '..-.',\n 'G': '--.', 'H': '....', 'I': '..',\n 'J': '.---', 'K': '-.-', 'L': '.-..',\n 'M': '--', 'N': '-.', 'O': '---',\n 'P': '.--.', 'Q': '--.-', 'R': '.-.',\n 'S': '...', 'T': '-', 'U': '..-',\n 'V': '...-', 'W': '.--', 'X': '-..-',\n 'Y': '-.--', 'Z': '--..',\n\n '0': '-----', '1': '.----', '2': '..---',\n '3': '...--', '4': '....-', '5': '.....',\n '6': '-....', '7': '--...', '8': '---..',\n '9': '----.', '$': ' ', ' ': '$'\n }\n\ntranslate = []\ntext = []\nt = []\ncond = True\n\nwhile True:\n try:\n selection = int(input(\"Enter '1' for Text to Morse, '2' for Morse to Text: \"))\n except ValueError:\n print(\"Sorry, wrong input!\")\n continue\n if selection == 2:\n break\n if selection == 1:\n break\n if selection != 1 and selection != 2:\n print(\"Sorry, wrong input!\")\n continue\n\ndef remove_space(string):\n return \"\".join(string.split())\n\nif selection == 1:\n input = input(\"Enter the sentence (for space enter $): \")\n input_list = list(input)\n for i in input_list:\n if i in morse:\n translate.append(morse.get(i))\n else:\n print('Error: Character not found')\n sys.exit()\n result =' '.join(map(str, translate))\n\n print(result)\n\nif selection == 2:\n input = input(str(\"Enter the morse code with space between letters and end the input with double space: \"))\n input_list = list(input)\n for i in input_list:\n if i == ' ':\n input_list[input_list.index(i)] = '/'\n if input_list[-2] == '/':\n for m in input_list:\n if cond == True:\n if m != '/':\n text.append(m)\n elif m == '/':\n if text == []:\n temp =' '.join(map(str, translate))\n t.append(remove_space(temp))\n translate = []\n elif text != []:\n temp_str =' '.join(map(str, text))\n temp_str_nospace = remove_space(temp_str)\n for key, value in morse.items():\n cond = False\n if value == temp_str_nospace:\n cond = True\n translate.append(key)\n text = []\n break \n elif cond == False:\n print('Error: Character not found!')\n sys.exit() \n elif input_list[-2] != '/':\n print('Error: the input needs to end with double space!')\n sys.exit()\n\n result =' '.join(map(str, t)) \n\n print(\"Result: \" + result)\n\n\n","repo_name":"areza244/Morse-code","sub_path":"Morse.py","file_name":"Morse.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30010303831","text":"import pytest\nimport os\nimport uuid\nimport subprocess\nimport shlex\nimport shutil\nfrom xcalar.compute.util.config import detect_config\n\nXLRDIR = os.environ[\"XLRDIR\"]\nXCE_CONFIG = detect_config().config_file_path\nXcalarRootPath = detect_config().xcalar_root_path\ntry:\n XCE_LOGDIR = detect_config().all_options['Constants.XcalarLogCompletePath']\nexcept KeyError as e:\n XCE_LOGDIR = '/var/log/xcalar'\nos.chdir(XLRDIR)\nxcalarVersion = subprocess.check_output([\"git\",\n \"describe\"]).strip().decode('utf-8')\nUUID = uuid.uuid1()\n\n\ndef test_asup_generation():\n # As we run the tests on a single slave(3 nodes) hardcoding nodeId to 0, if not asups are generated thrice on the same machine.\n cmd = 'python3.6 {}/scripts/Support.py {} 0 {} {} {} {} \"false\" 0 0'.format(\n XLRDIR, UUID, XCE_CONFIG, XcalarRootPath, XCE_LOGDIR, xcalarVersion)\n args = shlex.split(cmd)\n output = subprocess.run(\n args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n supportBackupDir = os.path.join(XcalarRootPath, 'support')\n supportBackupSupportId = os.path.join(supportBackupDir, str(UUID))\n if os.path.exists(supportBackupSupportId):\n shutil.rmtree(supportBackupSupportId)\n assert \"Successfully generated support bundle\" in output.stdout.decode(\n 'utf-8') and output.returncode == 0\n","repo_name":"varlogtim/xcalar","sub_path":"src/bin/tests/pyTestNew/test_asup_generation.py","file_name":"test_asup_generation.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2748531113","text":"import numpy as np\n\n# fastai\nimport fastai\nfrom fastai.text import BaseTokenizer, Tokenizer, nn, List, Vocab, Collection\nfrom fastai.text import TokenizeProcessor, NumericalizeProcessor\nfrom fastai.text import TextList, Learner, DatasetType\n\n# transformers\nimport transformers\nfrom transformers import PreTrainedModel, PreTrainedTokenizer, PretrainedConfig\nfrom transformers import AutoTokenizer, AutoModel, AutoConfig, AutoModelForSequenceClassification\nfrom transformers import AdamW\nfrom functools import partial\n\ndef get_preds_and_labels(learner, dfin, TEXT_COLUMN, Y_COLUMN, transformer_processor, learner_mapping):\n \n learner.data.add_test(TextList.from_df(dfin, cols=TEXT_COLUMN, processor=transformer_processor))\n test_preds, y = learner.get_preds(DatasetType.Test)\n test_preds = test_preds.detach().cpu().numpy()\n\n #y = y.detach().cpu().numpy()\n y = dfin[Y_COLUMN].tolist()\n best_preds = np.argmax(test_preds, axis=1)\n all_preds_ranked = np.argsort(-test_preds, axis=1)\n #save_pickle(all_preds_ranked, \"mapping_10Mall_preds_ranked.pkl\")\n\n learner_mapping = {v:k for k,v in learner_mapping.items()}\n y = np.array([ learner_mapping[v] for v in y])\n\n return best_preds, all_preds_ranked, y\n\n\n\ndef compute_accuracy(real_labels, best_preds, all_preds):\n acc_1 = np.mean([1 if r in a[:1] else 0 for (r,a) in zip(real_labels, all_preds)]) if all_preds is not None else -1\n acc_3 = np.mean([1 if r in a[:3] else 0 for (r,a) in zip(real_labels, all_preds)]) if all_preds is not None else -1\n acc_5 = np.mean([1 if r in a[:5] else 0 for (r,a) in zip(real_labels, all_preds)]) if all_preds is not None else -1\n acc_10 = np.mean([1 if r in a[:10] else 0 for (r,a) in zip(real_labels, all_preds)]) if all_preds is not None else -1\n acc_at_total = np.mean([1 if r in a else 0 for (r,a) in zip(real_labels, all_preds)]) if all_preds is not None else -1\n return np.round(acc_1,6), np.round(acc_3,6), np.round(acc_5,6), np.round(acc_10,6), np.round(acc_at_total,6)\n\nclass TransformersBaseTokenizer(BaseTokenizer):\n \"\"\"Wrapper around PreTrainedTokenizer to be compatible with fast.ai\"\"\"\n def __init__(self, pretrained_tokenizer: PreTrainedTokenizer, model_type = 'bert', **kwargs):\n self._pretrained_tokenizer = pretrained_tokenizer\n self.max_seq_len = pretrained_tokenizer.max_len\n self.model_type = model_type\n\n def __call__(self, *args, **kwargs): \n return self\n\n def tokenizer(self, t:str) -> List[str]:\n \"\"\"Limits the maximum sequence length and add the spesial tokens\"\"\"\n CLS = self._pretrained_tokenizer.cls_token\n SEP = self._pretrained_tokenizer.sep_token\n if self.model_type in ['roberta']:\n tokens = self._pretrained_tokenizer.tokenize(t, add_prefix_space=True)[:self.max_seq_len - 2]\n tokens = [CLS] + tokens + [SEP]\n #if self.model_type in ['bert']:\n # return [\"[CLS]\"] + self._pretrained_tokenizer.tokenize(t)[:self.max_seq_len - 2] + [\"[SEP]\"]\n else:\n tokens = self._pretrained_tokenizer.tokenize(t)[:self.max_seq_len - 2]\n if self.model_type in ['xlnet']:\n tokens = tokens + [SEP] + [CLS]\n else:\n tokens = [CLS] + tokens + [SEP]\n return tokens\n\nclass TransformersVocab(Vocab):\n def __init__(self, tokenizer: PreTrainedTokenizer):\n super(TransformersVocab, self).__init__(itos = [])\n self.tokenizer = tokenizer\n \n def numericalize(self, t:Collection[str]) -> List[int]:\n \"Convert a list of tokens `t` to their ids.\"\n return self.tokenizer.convert_tokens_to_ids(t)\n #return self.tokenizer.encode(t)\n\n def textify(self, nums:Collection[int], sep=' ') -> List[str]:\n \"Convert a list of `nums` to their tokens.\"\n nums = np.array(nums).tolist()\n return sep.join(self.tokenizer.convert_ids_to_tokens(nums)) if sep is not None else self.tokenizer.convert_ids_to_tokens(nums)\n \n def __getstate__(self):\n return {'itos':self.itos, 'tokenizer':self.tokenizer}\n\n def __setstate__(self, state:dict):\n self.itos = state['itos']\n self.tokenizer = state['tokenizer']\n self.stoi = collections.defaultdict(int,{v:k for k,v in enumerate(self.itos)})\n\nclass CustomTransformerModel(nn.Module):\n def __init__(self, transformer_model: PreTrainedModel, pad_idx):\n super(CustomTransformerModel,self).__init__()\n self.transformer = transformer_model\n self.pad_idx = pad_idx\n \n def forward(self, input_ids, attention_mask=None):\n \n # attention_mask\n # Mask to avoid performing attention on padding token indices.\n # Mask values selected in ``[0, 1]``:\n # ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n attention_mask = (input_ids!=self.pad_idx).type(input_ids.type()) \n \n logits = self.transformer(input_ids,\n attention_mask = attention_mask)[0] \n return logits","repo_name":"KevinRoitero/dilbert","sub_path":"finetuning_utils.py","file_name":"finetuning_utils.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"32175603366","text":"from rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n@api_view(['GET']) #요청 처리 방식 선택\ndef helloAPI(request):\n return Response(\"HELLO REST API\")\n\nfrom .serializers import BookSerializer\nfrom rest_framework import status\nfrom .models import Book\n@api_view(['POST', 'GET', 'PUT','DELETE']) #post, get 둘다 처리할 수 있는데, 구분은 해놔야 할것 같아.\ndef booksAPI(request):\n #전송 방식을 확인하는 방법은 request.method를 확인하면 됩니다.\n if request.method=='GET':\n # 전체 데이터 가져오기\n books=Book.objects.all()\n serializer=BookSerializer(books,many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n elif request.method=='POST': #post일때\n # 클라이언트가 전송한 데이터를\n # Model이 사용할 수 있는 데이터로 변환\n # print(\"1\") # 이 코드가 실행 안된다면url과 method 연결 실수\n serializer = BookSerializer(data=request.data)\n # print(\"2\") # 이 코드가 실패했다면, serializable 실패\n # 유효성 검사\n if serializer.is_valid():\n # print(\"3\") # 이 코드가 안된다면, 이름이 잘못된 것이다.\n serializer.save() # 데이터 저장\n # 성공했을 때 전송한 데이터를 다시 전송\n return Response(serializer.data,\n status=status.HTTP_201_CREATED)\n # 실패했을 때 처리\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n#기본키를 가지고 데이터를 찾아오고 없다면 404error 발생을 하겠다.\nfrom rest_framework.generics import get_object_or_404\n\n@api_view(['GET','PUT'])\ndef bookAPI(request, bid): #bid가 url에 포함되어있다.\n #기본키를 가지고 데이터 1개를 가져오는 것입니다.\n # books=get_object_or_404(Book, bid=bid) try except 대신 이걸 쓰지 그냥\n try:\n books = Book.objects.get(bid=bid)\n except Book.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n if request.method=='GET':\n serializer=BookSerializer(books)\n return Response(serializer.data, status=status.HTTP_200_OK)\n elif request.method=='PUT':\n serializer = BookSerializer(books, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n\nfrom django.shortcuts import render\ndef ajax(request):\n return render(request, \"ajax.html\")","repo_name":"mino1998/web_study","sub_path":"web_rest_study/apiserverapplication/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28894591535","text":"# -*- coding: utf-8 -*-\n\"\"\"\nMissing Worker Detection - ERL Emergency 2020\nCreated on Fri Mar 6 12:01:13 2020\n\n@author: Marta Kwiatkowska\n\"\"\"\n\nimport numpy as np\nimport cv2 as cv\nfrom imutils.object_detection import non_max_suppression\n\ndef DetectMissingWorker(img): \n DetectFullBody(img)\n is_face_detected = DetectFace(img)\n mask = DetectOrangeColor(img)\n DrawCountorus(mask, img)\n \n if cv.countNonZero(mask) & is_face_detected :\n print (\"Detected missing worker.\")\n else:\n print (\"No workers detected.\")\n\ndef DetectOrangeColor(img):\n hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n lower_orange = np.array([0, 200, 200])\n upper_orange = np.array([25, 255, 255])\n # lower_orange = np.array([10, 100, 20])\n # upper_orange = np.array([25, 255, 255])\n mask = cv.inRange(hsv, lower_orange, upper_orange)\n return mask\n\ndef DrawCountorus(mask, img):\n image,cnts,hie = cv.findContours(mask, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)\n cv.drawContours(img, cnts, -1, (0,255,255), thickness=1) \n #x,y,w,h = cv.boundingRect(cnts[len(cnts)-1])\n #cv.putText(img, 'Missing worker', (x+100, y-10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)\n \ndef DetectFace(img):\n detector = cv.CascadeClassifier(\"C:\\\\Users\\\\marta\\\\PycharmProjects\\\\OpenCV.Algorithms\\\\venv\\\\Lib\\\\site-packages\\\\cv2\\\\data\\\\haarcascade_frontalface_default.xml\")\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n rects = detector.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)\n \n for (i, (x, y, w, h)) in enumerate(rects):\n \tcv.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), thickness=1)\n \tcv.putText(img, \"Face #{}\".format(i + 1), (x, y - 10),\n \t\tcv.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 0), 1)\n \n if rects is not None:\n return True\n else:\n return False\n \ndef DetectFullBody(img):\n hog = cv.HOGDescriptor()\n hog.setSVMDetector(cv.HOGDescriptor_getDefaultPeopleDetector())\n \n (rect, weight) = hog.detectMultiScale(img, winStride=(4, 4), \tpadding=(8, 8), scale=1.05)\n \n rect = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rect])\n pick = non_max_suppression(rect, probs=None, overlapThresh=0.65)\n for (xA, yA, xB, yB) in pick:\n \tcv.rectangle(img, (xA, yA), (xB, yB), (0, 255, 0), 1)\n \nif __name__ == \"__main__\":\n \n# code for the image\n \n source_path = \"D:\\\\repos\\\\UAV.OpenCV.Algorithms\\\\UAV.OpenCV.Algorithms.Missing.Worker\\\\Images\\\\Worker.png\"\n img = cv.imread(source_path) \n\n DetectMissingWorker(img) \n \n cv.imshow('image',img)\n cv.waitKey(0)\n cv.destroyAllWindows() \n \n","repo_name":"mkwiatkowska003/Drone-programming","sub_path":"UAV.OpenCV.Algorithms.Missing.Worker/MissingWorker.py","file_name":"MissingWorker.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1320681974","text":"from __future__ import annotations\n\nimport os\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom base import NeuralBase\n\nINPUT_PATH_TO_FILE = os.path.join(\n os.getcwd(), \"artificial_intelligence\", \"hw4\", \"starter\", \"data1.csv\"\n)\n\nOUTPUT_PATH_TO_FILE = os.path.join(\n os.getcwd(), \"artificial_intelligence\", \"hw4\", \"starter\", \"results1.csv\"\n)\n\n\n@dataclass\nclass Perceptron(NeuralBase):\n \"\"\"Implements the Perceptron Learning Algorithm\"\"\"\n\n def __init__(self, eta: float = 0.01, niter: int = 50, bias: bool = True):\n self.eta = eta\n self.niter = niter\n self.bias = bias\n self.errors = None\n self.thetas = None\n self.degree = 1\n\n def fit(self, X: np.ndarray, y: np.ndarray) -> Perceptron:\n \"\"\"fits training data\n\n Parameters\n ----------\n X : np.ndarray, shape=(n_samples, p_features)\n n_samples is number of instances i.e rows\n p_features is number of features (dimension of data)\n y : np.ndarray\n response variable\n\n Returns\n -------\n Perception\n object with fitted parameters\n \"\"\"\n # Add bias unit to design matrix\n degree, bias = self.degree, self.bias\n X = self.make_polynomial(X, degree, bias)\n\n # Generate small random weights\n self.thetas = np.random.rand(X.shape[1])\n self.errors = np.zeros(self.niter)\n weights = {}\n\n for index in range(self.niter):\n # Count total misclassifications in each iteration\n count = 0\n\n # Iterate through each example and identify misclassifications\n # Number of errors must decline after each iteration\n for xi, target in zip(X, y):\n # make prediction\n yhat = self.predict(xi)\n\n # update weights if there are misclassifications\n if target * yhat <= 0:\n self.thetas += self.eta * (target - yhat) * xi\n count += 1\n # updated weight per iteration\n weights[index] = self.thetas.copy()\n # store count of errors in each iteration\n self.errors[index] = count\n\n self.weights = pd.DataFrame.from_dict(\n weights, orient=\"index\", columns=[\"bias\", \"coef1\", \"coef2\"]\n )\n\n return self\n\n def predict(\n self, X: np.ndarray, thetas: Optional[np.ndarray] = None\n ) -> np.ndarray:\n \"\"\"Activation function to determine if neuron should fire or not\n\n Parameters\n ----------\n X : np.ndarray\n design matrix that includes the bias\n thetas : Union[np.ndarray, None], optional\n weights from fitting, by default None\n\n Returns\n -------\n np.ndarray\n predictions\n \"\"\"\n if thetas is None and self.thetas is None:\n raise ValueError(\n \"Empty weights provided, either call fit() first or provide \\\n weights\"\n )\n elif thetas is None:\n return 1 if self.net_input(X, self.thetas) >= 0 else -1\n return 1 if self.net_input(X, thetas) >= 0 else -1\n\n def plot_misclassifications(self) -> None:\n \"\"\"Plots the misclassifications given number of iterations\n Requires call to fit() first, otherwise raise appropriate error\n\n Raises\n ------\n AttributeError\n if fit() has not been called\n \"\"\"\n if self.errors is None:\n raise AttributeError(\n \"Must call fit() first before plotting \\\n misclassifications\"\n )\n # plot the errors\n plt.plot(range(1, self.niter + 1), self.errors, marker=\"o\")\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"# of misclassifications\")\n plt.grid()\n plt.show()\n\n\ndef plot_data(inputs, targets, weights):\n plt.figure(figsize=(10, 6))\n plt.grid(True)\n\n for input, target in zip(inputs, targets):\n plt.plot(input[0], input[1], \"ro\" if (target == 1.0) else \"bo\")\n\n for i in np.linspace(np.amin(inputs[:, :1]), np.amax(inputs[:, :1])):\n slope = -weights[1] / weights[2]\n intercept = -weights[0] / weights[2]\n y = (slope * i) + intercept\n plt.plot(i, y, \"ko\")\n plt.show()\n\n\ndef main():\n \"\"\"YOUR CODE GOES HERE\"\"\"\n pla = Perceptron(eta=0.1)\n data = np.genfromtxt(INPUT_PATH_TO_FILE, delimiter=\",\")\n inputs, targets = data[:, :2], data[:, 2]\n pla.fit(inputs, targets)\n weight = pla.thetas\n plot_data(inputs, targets, weight)\n\n pla.weights.to_csv(OUTPUT_PATH_TO_FILE)\n\n\nif __name__ == \"__main__\":\n \"\"\"DO NOT MODIFY\"\"\"\n main()\n","repo_name":"mrajancsr/Market-Learn","sub_path":"artificial_intelligence/hw4/starter/pla.py","file_name":"pla.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"752059016","text":"from django.db.models import Q\nfrom userprofile.models import Profile\nfrom rest_framework.response import Response\nfrom django.shortcuts import render, get_object_or_404\nfrom rest_framework.views import APIView\nfrom rest_framework.serializers import ValidationError\nfrom django.conf import settings\nfrom rest_framework.filters import (\n SearchFilter,\n OrderingFilter,\n)\nfrom rest_framework.decorators import api_view\nfrom rest_framework.generics import (\n CreateAPIView,\n DestroyAPIView,\n ListAPIView,\n UpdateAPIView,\n RetrieveAPIView,\n RetrieveUpdateAPIView\n)\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom datetime import datetime, timezone, date\nfrom django.contrib.auth.models import User\nfrom rest_framework.permissions import (\n AllowAny,\n IsAuthenticated,\n IsAdminUser,\n IsAuthenticatedOrReadOnly,\n\n)\nfrom django_filters import rest_framework as filters\nfrom .models import Pagamento, AulaAvulsaGrupo, AulaExperimental, AulaPersonal, ResumoMensal, VendaItems, Item, DespesasFixa, Teste, ResumoManualMes, Despesas\n\n\nfrom .permissions import IsOwnerOrReadOnly\n\nfrom .serializers import (\n PagamentoCreateUpdateSerializer,\n PagamentoDetailSerializer,\n PagamentoListSerializer,\n PagamentoListAllSerializer,\n ResumoMensalListAllSerializer,\n AulaAvulsaGrupoCreateUpdateSerializer,\n AulaExperimentalCreateUpdateSerializer,\n AulaPersonalCreateUpdateSerializer,\n VendaItemsCreateUpdateSerializer,\n ItemCreateUpdateSerializer,\n ResumoManualMesListAllSerializer,\n TesteSerializer,\n)\nimport mercadopago\n\nimport django_filters\nfrom django.db.models import Q\n\n\nclass PagamentoFilter(filters.FilterSet):\n multi_name_fields = django_filters.CharFilter(\n method='filter_by_all_name_fields')\n\n class Meta:\n model = Pagamento\n fields = []\n\n def filter_by_all_name_fields(self, queryset, name, value):\n return queryset.filter(\n Q(city__icontains=value) | Q(address__icontains=value) | Q(\n state__icontains=value)\n )\n\n\nclass PagamentoCreateAPIView(CreateAPIView):\n queryset = Pagamento.objects.all()\n serializer_class = PagamentoCreateUpdateSerializer\n permission_classes = [IsAuthenticated]\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass ItemCreateAPIView(CreateAPIView):\n queryset = Item.objects.all()\n serializer_class = ItemCreateUpdateSerializer\n permission_classes = [IsAuthenticated]\n\n def perform_create(self, serializer):\n serializer.save()\n\n\nclass AulaAvulsaGrupoCreateAPIView(CreateAPIView):\n queryset = AulaAvulsaGrupo.objects.all()\n serializer_class = AulaAvulsaGrupoCreateUpdateSerializer\n permission_classes = [IsAuthenticated]\n\n def perform_create(self, serializer):\n if self.request.data['user']:\n id = self.request.data['user']\n print(f'id = {id}')\n u = User.objects.get(id=id)\n if u is None:\n u = User.objects.first()\n serializer.save(user=u)\n\n\nclass VendaItemsCreateAPIView(CreateAPIView):\n queryset = VendaItems.objects.all()\n serializer_class = VendaItemsCreateUpdateSerializer\n permission_classes = [IsAuthenticated]\n\n def perform_create(self, serializer):\n if self.request.data['user']:\n id = self.request.data['user']\n print(f'id = {id}')\n u = User.objects.get(id=id)\n if u is None:\n u = User.objects.first()\n\n venda = serializer.save(user=u)\n print(f'venda.quant = {venda.quant}')\n i = venda.item\n print(f'i.estoque = {i.estoque}')\n i.estoque = i.estoque - venda.quant\n i.save()\n print(f'i.estoque dps de salvo = {i.estoque}')\n\n\nclass AulaExperimentalCreateAPIView(CreateAPIView):\n queryset = AulaExperimental.objects.all()\n serializer_class = AulaExperimentalCreateUpdateSerializer\n permission_classes = [IsAuthenticated]\n\n def perform_create(self, serializer):\n if self.request.data['user']:\n id = self.request.data['user']\n print(f'id = {id}')\n u = User.objects.get(id=id)\n if u is None:\n u = User.objects.first()\n serializer.save(user=u)\n\n\nclass AulaPersonalCreateAPIView(CreateAPIView):\n queryset = AulaPersonal.objects.all()\n serializer_class = AulaPersonalCreateUpdateSerializer\n permission_classes = [IsAuthenticated]\n\n def perform_create(self, serializer):\n if self.request.data['user']:\n id = self.request.data['user']\n print(f'id = {id}')\n u = User.objects.get(id=id)\n if u is None:\n u = User.objects.first()\n serializer.save(user=u)\n\n\nclass PagamentoDetailAPIView(RetrieveAPIView):\n queryset = Pagamento.objects.all()\n serializer_class = PagamentoDetailSerializer\n lookup_field = 'id'\n permission_classes = [AllowAny]\n # lookup_url_kwarg = \"abc\"\n\n\nclass PagamentoUpdateAPIView(UpdateAPIView):\n queryset = Pagamento.objects.all()\n serializer_class = PagamentoCreateUpdateSerializer\n lookup_field = 'id'\n permission_classes = [IsAuthenticated]\n\n def perform_update(self, serializer):\n serializer.save()\n\n # def perform_update(self, serializer):\n # pagamento_obj = self.get_object()\n # print(f'pagamento_obj = {pagamento_obj}')\n\n # instance = serializer.save()\n # pag_user = instance.user\n # todos_pag = Pagamento.objects.filter(\n # user=pag_user, data__gt=instance.data)[:12]\n # print(f'todos_pag ={todos_pag}')\n # plano_pag = instance.user.profile.plano_pagamento\n\n # if(plano_pag == \"Trimestral\"):\n # for pp in todos_pag[:2]:\n # pp.pago = True\n # pp.save()\n\n # if(plano_pag == \"Semestral\"):\n # for pp in todos_pag[:5]:\n # pp.pago = True\n # pp.save()\n # if(plano_pag == \"Anual\"):\n # for pp in todos_pag[:11]:\n # pp.pago = True\n # pp.save()\n\n # return instance\n\n\nclass PagamentoDeleteAPIView(DestroyAPIView):\n queryset = Pagamento.objects.all()\n serializer_class = PagamentoDetailSerializer\n lookup_field = 'id'\n permission_classes = [IsAdminUser]\n\n\nclass PagamentoListAPIView(ListAPIView):\n serializer_class = PagamentoListSerializer\n permission_classes = [AllowAny]\n\n def post(self, request, *args, **kwargs):\n # this makes post method on listapiview\n return self.list(request, *args, **kwargs)\n\n def list(self, request):\n if request.data['user_id'] is not None:\n u = User.objects.get(id=request.data['user_id'])\n else:\n u = User.objects.first()\n\n qs = Pagamento.objects.all()\n queryset_list = Pagamento.objects.filter(\n user=u)\n\n return Response({\"financeiros\": PagamentoListSerializer(queryset_list, many=True).data})\n\n\nclass PagamentoListAllAPIView(ListAPIView):\n serializer_class = PagamentoListAllSerializer\n permission_classes = [AllowAny]\n\n def get_queryset(self, *args, **kwargs):\n now = datetime.now(timezone.utc)\n year = now.year\n month = now.month\n\n queryset_list = Pagamento.objects.filter(data__year__gte=year, data__year__lte=year+1,\n user__is_active=True).exclude(user__profile__is_professor=True) # filter(user=self.request.user)\n\n return queryset_list\n\n\nclass PagamentoPorAulunoAPIView(ListAPIView):\n serializer_class = PagamentoListAllSerializer\n permission_classes = [IsAuthenticated]\n pagination_class = LimitOffsetPagination\n page_size = 200\n\n def get_queryset(self, *args, **kwargs):\n user = self.request.user\n\n now = datetime.now(timezone.utc)\n year = now.year\n month = now.month\n\n queryset_list = Pagamento.objects.filter(\n user__is_active=True, user=user, data__year__gte=year, data__year__lte=year+1) # filter(user=self.request.user)\n\n return queryset_list\n\n\nclass ItemsListAllAPIView(ListAPIView):\n serializer_class = ItemCreateUpdateSerializer\n permission_classes = [AllowAny]\n\n def get_queryset(self, *args, **kwargs):\n\n queryset_list = Item.objects.all() # filter(user=self.request.user)\n\n return queryset_list\n\n\nclass TesteListAllAPIView(ListAPIView):\n serializer_class = TesteSerializer\n permission_classes = [AllowAny]\n\n def get_queryset(self, *args, **kwargs):\n\n queryset_list = Teste.objects.all() # filter(user=self.request.user)\n\n return queryset_list\n\n\nclass ResumoManualMesListAllAPIView(ListAPIView):\n serializer_class = ResumoManualMesListAllSerializer\n permission_classes = [AllowAny]\n\n def get_queryset(self, *args, **kwargs):\n\n queryset_list = ResumoManualMes.objects.all() # filter(user=self.request.user)\n\n return queryset_list\n\n\nclass ResumoMensalListAllAPIView(ListAPIView):\n serializer_class = ResumoMensalListAllSerializer\n permission_classes = [AllowAny]\n\n def get_queryset(self, *args, **kwargs):\n\n queryset_list = ResumoMensal.objects.all() # filter(user=self.request.user)\n\n return queryset_list\n\n\n@api_view(['POST'])\ndef mercadopago_pix(request):\n print(f'antes do sdk')\n sdk = mercadopago.SDK(\n \"TEST-4458147267707345-101313-a90d00320a2823cb5a9161348e5ebcf6-98517282\")\n print(f'sdk')\n # alunoId = request.data['alunoId']\n payment_data = {\n \"transaction_amount\": 100,\n \"description\": \"Título do produto\",\n \"payment_method_id\": \"pix\",\n \"payer\": {\n \"email\": \"test@test.com\",\n \"first_name\": \"Test\",\n \"last_name\": \"User\",\n \"identification\": {\n \"type\": \"CPF\",\n \"number\": \"191191191-00\"\n },\n \"address\": {\n \"zip_code\": \"06233-200\",\n \"street_name\": \"Av. das Nações Unidas\",\n \"street_number\": \"3003\",\n \"neighborhood\": \"Bonfim\",\n \"city\": \"Osasco\",\n \"federal_unit\": \"SP\"\n }\n }\n }\n print(f'payment_daya = {payment_data}')\n payment_response = sdk.payment().create(payment_data)\n payment = payment_response[\"response\"]\n print(f'payment = {payment}')\n return Response(payment)\n\n\n@api_view(['POST'])\ndef mensal_por_professor(request):\n total_prof = 0\n total_studio = 0\n now = datetime.now(timezone.utc)\n\n print('dentro do mensao_por_professor')\n professorId = request.data['professorId']\n print(f'professorId {professorId}')\n data = request.data['data']\n # data = datetime.strptime(data_string, '%m/%d/%y')\n print(f'data {data}')\n print(f' type data {type(data)}')\n dt_obj = datetime.strptime(\n data, '%d-%m-%Y')\n print(f'dt_obj = {dt_obj}')\n print(f'dt_obj.year = {dt_obj.year}')\n print(f'dt_obj.month = {dt_obj.month}')\n year = dt_obj.year\n month = dt_obj.month\n year = dt_obj.year\n month = dt_obj.month\n professor = Profile.objects.get(id=professorId)\n print(f'professor = {professor}')\n alunos_do_professor = Profile.objects.filter(\n professor=professor.user, user__is_active=True)\n print(f'alunos_do_professor = {alunos_do_professor}')\n\n listResposta = []\n for aluno in alunos_do_professor:\n obj = {\"dia_pagamento\": 0, \"first_name\": \"Sem Nome\",\n \"id_pagamento\": 0, \"valor\": 0, \"valor_professor\": 0, \"valor_studio\": 0}\n print('------------------------------------------')\n\n if(aluno.dia_pagamento):\n obj['dia_pagamento'] = aluno.dia_pagamento\n\n else:\n obj['dia_pagamento'] = 0\n\n obj['first_name'] = aluno.user.first_name\n print(f'aluno.user.first_name === {aluno.user.first_name}')\n\n pagamento_do_aluno = aluno.user.pagamento_set.filter(\n data__year=year, data__month=month).first()\n\n print(f'pagament_do_aluno {pagamento_do_aluno}')\n if(pagamento_do_aluno):\n\n obj['valor'] = pagamento_do_aluno.valor\n obj['valor_professor'] = pagamento_do_aluno.valor * 0.4\n obj['valor_studio'] = pagamento_do_aluno.valor * 0.6\n obj['id_pagamento'] = pagamento_do_aluno.id\n total_prof += pagamento_do_aluno.valor * 0.4\n total_studio += pagamento_do_aluno.valor * 0.6\n\n listResposta.append(obj)\n # final = sorted(listResposta, key=lambda x: x[0])\n return Response({\n \"data\": listResposta,\n \"total_prof\": total_prof,\n \"total_studio\": total_studio\n })\n\n\n@api_view(['POST'])\ndef pagamentos_pendentes(request):\n alunoId = request.data['alunoId']\n print(f'alunoId from api/pendentes {alunoId}')\n user = User.objects.get(id=alunoId)\n now = datetime.now(timezone.utc)\n dt = date.today()\n month = now.month\n year = now.year\n pagamentos_em_aberto = Pagamento.objects.filter(\n user=user, pago=False, data__lt=now)\n print(f'pagamentos_em_aberto = {pagamentos_em_aberto}')\n quantos_em_aberto = pagamentos_em_aberto.count()\n print(f'quantos_em_aberto = {quantos_em_aberto}')\n proximo_boleto = Pagamento.objects.filter(\n user=user, pago=False, data__gt=now).first()\n print(f'proximo_boleto = {proximo_boleto}')\n diferenca = proximo_boleto.data - dt\n diferenca_dias = diferenca.days\n print(f'diferenca = {diferenca}')\n print(f'diferenca.days = {diferenca.days}')\n\n return Response({\n \"quantos_em_aberto\": quantos_em_aberto,\n \"diferenca_prox_boleto\": diferenca_dias\n })\n\n\n@api_view(['GET'])\ndef get_resumo_mes(request):\n now = datetime.now(timezone.utc)\n year = now.year\n month = now.month\n\n valor_experimental = 50\n valor_avulsa_grupo = 60\n valor_personal = 120\n valor_matricula = 80\n valor_rematricula = 50\n total_itens = 0\n total_despesas = 0\n\n for despesa in DespesasFixa.objects.all():\n total_despesas += despesa.valor\n\n for venda in VendaItems.objects.filter(data__year__gte=year, data__month__gte=month, data__year__lte=year, data__month__lte=month):\n total_itens += venda.item.valor * venda.quant\n\n total_experimental = AulaExperimental.objects.filter(data__year__gte=year,\n data__month__gte=month,\n data__year__lte=year,\n data__month__lte=month).count() * valor_experimental\n\n total_avulsa = AulaAvulsaGrupo.objects.filter(data__year__gte=year,\n data__month__gte=month,\n data__year__lte=year,\n data__month__lte=month).count() * valor_avulsa_grupo\n\n total_personal = AulaPersonal.objects.filter(data__year__gte=year,\n data__month__gte=month,\n data__year__lte=year,\n data__month__lte=month).count() * valor_personal\n\n total_matricula = Profile.objects.filter(created_at__year__gte=year,\n created_at__month__gte=month,\n created_at__year__lte=year,\n created_at__month__lte=month).count() * valor_matricula\n\n total_rematricula = Profile.objects.filter(data_rematricula__year__gte=year,\n data_rematricula__month__gte=month,\n data_rematricula__year__lte=year,\n data_rematricula__month__lte=month).count() * valor_rematricula\n\n total_pagamento = 0\n\n for pagamento in Pagamento.objects.filter(data__year__gte=year, data__month__gte=month, data__year__lte=year, data__month__lte=month).filter(pago=True):\n total_pagamento += pagamento.valor\n\n total_mes = total_experimental + total_avulsa + total_matricula + \\\n total_pagamento + total_personal + total_rematricula + total_itens - total_despesas\n\n return Response({\n \"total_experimental\": total_experimental,\n \"total_avulsa\": total_avulsa,\n \"total_itens\": total_itens,\n \"total_personal\": total_personal,\n \"total_matricula\": total_matricula,\n \"total_rematricula\": total_rematricula,\n \"total_pagamento\": total_pagamento,\n \"total_despesas\": total_despesas,\n \"total_mes\": total_mes\n })\n\n\ndef resumo_mensal():\n now = datetime.now(timezone.utc)\n year = now.year\n month = now.month\n\n valor_experimental = 50\n valor_avulsa_grupo = 60\n valor_personal = 120\n valor_matricula = 80\n valor_rematricula = 50\n total_itens = 0\n total_despesas = 0\n\n for despesa in DespesasFixa.objects.all():\n total_despesas += despesa.valor\n for venda in VendaItems.objects.filter(data__year__gte=year, data__month__gte=month, data__year__lte=year, data__month__lte=month):\n total_itens += venda.item.valor * venda.quant\n\n total_experimental = AulaExperimental.objects.filter(data__year__gte=year,\n data__month__gte=month,\n data__year__lte=year,\n data__month__lte=month).count() * valor_experimental\n\n total_avulsa = AulaAvulsaGrupo.objects.filter(data__year__gte=year,\n data__month__gte=month,\n data__year__lte=year,\n data__month__lte=month).count() * valor_avulsa_grupo\n\n total_personal = AulaPersonal.objects.filter(data__year__gte=year,\n data__month__gte=month,\n data__year__lte=year,\n data__month__lte=month).count() * valor_personal\n\n total_matricula = Profile.objects.filter(created_at__year__gte=year,\n created_at__month__gte=month,\n created_at__year__lte=year,\n created_at__month__lte=month).count() * valor_matricula\n\n total_rematricula = Profile.objects.filter(created_at__year__gte=year,\n created_at__month__gte=month,\n created_at__year__lte=year,\n created_at__month__lte=month).count() * valor_rematricula\n\n total_pagamento = 0\n\n for pagamento in Pagamento.objects.filter(data__year__gte=year, data__month__gte=month, data__year__lte=year, data__month__lte=month).filter(pago=True):\n total_pagamento += pagamento.valor\n\n total_mes = total_experimental + total_avulsa + total_matricula + \\\n total_pagamento + total_personal + total_rematricula + total_itens - total_despesas\n\n ResumoMensal.objects.create(total_experimental=total_experimental, total_avulsa=total_avulsa, total_personal=total_personal,\n total_matricula=total_matricula, total_rematricula=total_rematricula, total_pagamento=total_pagamento, total_mes=total_mes, total_itens=total_itens, total_despesas=total_despesas)\n\n\n@api_view(['GET'])\ndef get_pagamento_professor(request):\n now = datetime.now(timezone.utc)\n year = now.year\n month = now.month\n\n list_prof = []\n list_prof_rend = []\n for prof in Profile.objects.filter(is_professor=True):\n u = prof.user\n list_prof.append(u.first_name)\n valor_acumulado = 0\n for pr in u.professor.all():\n valor = 0\n plano = pr.user.profile.plano\n plano_pagamento = pr.user.profile.plano_pagamento\n if (plano == \"4 Aulas\"):\n if(plano_pagamento == \"Mensal\"):\n valor = 200\n if(plano_pagamento == \"Trimestral\"):\n valor = 190\n if(plano_pagamento == \"Semestral\"):\n valor = 180\n if(plano_pagamento == \"Anual\"):\n valor = 170\n if (plano == \"8 Aulas\"):\n if(plano_pagamento == \"Mensal\"):\n valor = 320\n if(plano_pagamento == \"Trimestral\"):\n valor = 300\n if(plano_pagamento == \"Semestral\"):\n valor = 280\n if(plano_pagamento == \"Anual\"):\n valor = 260\n if (plano == \"12 Aulas\"):\n if(plano_pagamento == \"Mensal\"):\n valor = 440\n if(plano_pagamento == \"Trimestral\"):\n valor = 420\n if(plano_pagamento == \"Semestral\"):\n valor = 400\n if(plano_pagamento == \"Anual\"):\n valor = 380\n if (plano == \"16 Aulas\"):\n if(plano_pagamento == \"Mensal\"):\n valor = 580\n if(plano_pagamento == \"Trimestral\"):\n valor = 560\n if(plano_pagamento == \"Semestral\"):\n valor = 540\n if(plano_pagamento == \"Anual\"):\n valor = 520\n valor_acumulado += valor\n list_prof_rend.append(valor_acumulado)\n print(f'lista_prof {list_prof}')\n print(f'lista_prof_rend {list_prof_rend}')\n resposta = dict(zip(list_prof, list_prof_rend))\n return Response({\n \"professores\": list_prof,\n \"alunos\": list_prof_rend,\n \"resposta\": resposta,\n })\n","repo_name":"vukknesh/natalia-backend","sub_path":"financeiro/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32411139764","text":"import random\r\nimport discord\r\nfrom discord import option\r\nimport os\r\n# from boto.s3.connection import S3Connection\r\n\r\nintents = discord.Intents.all()\r\nbot = discord.Bot()\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print('logged in as {0.user}'.format(bot))\r\n print('User ID:')\r\n print(bot.user.id)\r\n print(\"Loading Roller Bot\\n\")\r\n\r\n\r\n@bot.command(description='Help menu.')\r\nasync def help(ctx):\r\n embed = discord.Embed(\r\n color = discord.Color.green(),\r\n title = \"__Help Menu__\",\r\n description = \"This is the help menu for the **Roller Bot 2.0**!!\"\r\n )\r\n embed.add_field(name=\"__**Commands:**__\", value=f\"\"\"\r\n **/roll** - To make roll some dice.\r\n **/battle** - Set the battle order of everyone. \\n (feel free to add in some extra battlers if you'd like)\r\n \"\"\")\r\n await ctx.respond(embed=embed)\r\n\r\n\r\ndef __init__(self, bot):\r\n print(\"test\")\r\n self.bot = bot\r\n\r\n\r\n@bot.slash_command(name=\"roll\", description='Role some dice!')\r\n@option(\r\n \"num\",\r\n description='How many dice?'\r\n)\r\n@option(\r\n \"d\",\r\n description='How many sided dice?'\r\n)\r\n@option(\r\n \"mod\",\r\n description='Are there any modifiers on your role?',\r\n required=False\r\n)\r\nasync def roll(ctx, num, d, mod):\r\n print(ctx.author.display_name + ' ran /roll')\r\n final = 0\r\n string=''\r\n di=int(d)\r\n for i in range (int(num)):\r\n r=random.randint(1, di)\r\n final += r\r\n n = '['+str(r)+']'\r\n if i==0:\r\n string += n\r\n else:\r\n string += f' + {n}'\r\n if mod is not None:\r\n final += int(mod)\r\n string += f' + {mod}'\r\n f = str(final)\r\n string += f' = `{f}`'\r\n head = num+'d'+d\r\n if mod is not None:\r\n head += '+'+mod\r\n head += f' = `{f}`'\r\n embed = discord.Embed(\r\n color=discord.Color.blue(),\r\n title=head,\r\n description=string\r\n )\r\n embed.set_footer(icon_url=ctx.author.avatar, text=f\"Command Requested by: {ctx.author.display_name}\")\r\n return await ctx.respond(embed=embed)\r\n\r\n\r\n@bot.slash_command(name=\"battle\", description='Role some dice!')\r\n@option(\r\n \"extras\",\r\n description='Enter any extra players or opponents (seperate with commas)',\r\n required=False\r\n)\r\nasync def battle(ctx, extras):\r\n print(ctx.author.display_name + ' ran /battle')\r\n players = []\r\n if extras is not None:\r\n players = extras.split(',')\r\n for i in range(len(players)):\r\n players[i] = players[i].strip()\r\n voice = ctx.author.voice\r\n if voice != None:\r\n channel = voice.channel\r\n people = channel.members\r\n for p in people:\r\n players.append(p.display_name)\r\n \r\n random.shuffle(players)\r\n out = ''\r\n for i in range(len(players)):\r\n out += str(i+1) + ') ' + players[i] + '\\n'\r\n embed = discord.Embed(\r\n color=discord.Color.green(),\r\n title='Battle Order:',\r\n description=out\r\n )\r\n embed.set_footer(icon_url=ctx.author.avatar, text=f\"Command Requested by: {ctx.author.display_name}\")\r\n return await ctx.respond(embed=embed)\r\n else:\r\n embed = discord.Embed(\r\n color=discord.Color.red(),\r\n title='Error!',\r\n description='User ***' + ctx.author.display_name + '*** is not currently in a voice channel.'\r\n )\r\n embed.set_footer(icon_url=ctx.author.avatar, text=f\"Command Requested by: {ctx.author.display_name}\")\r\n return await ctx.respond(embed=embed)\r\n\r\n# s3 = S3Connection(os.environ.get('BOT_TOKEN'), os.environ.get('BOT_TOKEN'))\r\n\r\nbot.run(os.environ['BOT_TOKEN'])","repo_name":"Neil-Kasson/Roller2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23672954350","text":"#step 2 \r\n\r\n\r\n#importing \r\nimport math \r\nmath.sqrt(9)\r\n\r\nimport time \r\ntime.ctime()\r\n\r\n#arrays \r\na = [1,2,3,4]\r\na \r\na.append(8)\r\na\r\n\r\n\r\n#looping through arrays in 2 ways\r\nfor i in range(len(a)):\r\n print([i])\r\n \r\nfor x in a:\r\n print(x)\r\n\r\n\r\n#dictionary \r\nd = {'cat':'meow', 'dog':'bark',\r\n 'bird':'chrip'}\r\nd['dog'] = 'run'\r\n\r\n#if-else\r\nx = 2\r\nif x == 1:\r\n print(5)\r\nelse:\r\n print(1)","repo_name":"Chess777-tech/LearnPython","sub_path":"learn python step 2.py","file_name":"learn python step 2.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74837455304","text":"import os\nfrom celery import Celery\n\n# Set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoProject1.settings')\n\napp = Celery('djangoProject1')\n\n# Load tasks from all registered Django app configs.\napp.config_from_object('django.conf:settings', namespace='CELERY')\napp.autodiscover_tasks()\n\n\napp.conf.beat_scheduler = 'django_celery_beat.schedulers:DatabaseScheduler'\n","repo_name":"theradP/zenduty_task","sub_path":"djangoProject1/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17353523136","text":"from tkinter import *\r\nfrom tkinter import ttk\r\nimport subprocess\r\nfrom tkinter.filedialog import askopenfilename,asksaveasfilename\r\nimport pyautogui as pag\r\nfrom tkinter import colorchooser\r\nfrom tkfontchooser import askfont\r\nfrom gtts import gTTS\r\nimport os\r\nimport wikipedia as wiki\r\nfrom tkinter import scrolledtext\r\nimport tkinter.messagebox as tmsg\r\nimport cv2\r\nimport pytesseract\r\n\r\n\r\n#editor interface configration\r\nroot = Tk()\r\nroot.title(\"Integrated Editor (IE)\")\r\nroot.state(\"zoomed\")\r\nphoto = PhotoImage(file = \"img1.png\")\r\nroot.iconphoto(False, photo)\r\nroot.resizable(0,0)\r\n\r\n#Python functions\r\nfile_path = ''\r\n#myfont=(\"Times New Roman\", 12, \"bold\")\r\n\r\n\r\ndef set_file_path(path):\r\n global file_path\r\n file_path = path\r\n\r\n\r\ndef open_file():\r\n path = askopenfilename(filetypes=[('Python Files', '*.py')])\r\n with open(path, 'r') as file:\r\n code = file.read()\r\n input_data.delete('1.0', END)\r\n input_data.insert('1.0', code)\r\n set_file_path(path)\r\n\r\n\r\ndef save_as():\r\n if file_path == '':\r\n path = asksaveasfilename(filetypes=[('Python Files', '*.py')])\r\n else:\r\n path = file_path\r\n with open(path, 'w') as file:\r\n code = input_data.get('1.0', END)\r\n file.write(code)\r\n set_file_path(path)\r\n\r\n\r\ndef run():\r\n if file_path == '':\r\n save_prompt = Toplevel()\r\n text = Label(save_prompt, text='Please save your code')\r\n text.pack()\r\n return\r\n command = f'python {file_path}'\r\n process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n output, error = process.communicate()\r\n con.insert('1.0', output)\r\n con.insert('1.0', error)\r\ndef cut():\r\n pag.hotkey(\"ctrl\",\"x\")\r\n\r\ndef copy():\r\n pag.hotkey(\"ctrl\",\"c\")\r\n\r\ndef paste():\r\n pag.hotkey(\"ctrl\",\"v\")\r\n\r\ndef all():\r\n pag.hotkey(\"ctrl\",\"a\")\r\n\r\ndef find():\r\n pag.hotkey(\"ctrl\",\"f\")\r\n\r\ndef color():\r\n mycolor=colorchooser.askcolor(initialcolor='#ff0000')\r\n col=mycolor[1]\r\n print(mycolor)\r\n input_data.config(fg=col)\r\n\r\n\r\ndef action():\r\n file=askopenfilename()\r\n img=cv2.imread(file)\r\n tmsg.showinfo(\"submit\",\"submitted sucessfully\")\r\n config = ('-l eng --oem 1 --psm 3')\r\n # pytessercat\r\n pytesseract.pytesseract.tesseract_cmd = 'C:/Program Files (x86)/Tesseract-OCR/tesseract.exe'\r\n data = pytesseract.image_to_string(img, config=config)\r\n # print text\r\n global data1\r\n print(data)\r\n data1 = data.replace('{} @ ® .$ >>> ','\\n')\r\n input_data.delete(\"0.0\",END)\r\n input_data.insert(0.0,data1)\r\n\r\n\r\ndef font_changed():\r\n myfont=askfont(root)\r\n print(myfont)\r\n nFont=(myfont['family'],myfont['size'],myfont['slant'] or myfont['weight'])\r\n print(nFont)\r\n input_data['font']=nFont\r\n\r\ndef Change_theme(x):\r\n if x==1:\r\n input_data.config(bg=\"#262626\",fg=\"white\",font=('consolas 12'),cursor=\"arrow white\")\r\n text_area.config(bg=\"#262626\",fg=\"white\",font=('consolas 12'),cursor=\"arrow white\")\r\n con.config(bg=\"#262626\",fg=\"white\",font=('consolas 12'),cursor=\"arrow white\")\r\n l1.config(bg=\"#808080\",fg=\"white\")\r\n button.config(bg=\"#808080\",fg=\"white\")\r\n\r\n\r\n else:\r\n input_data.configure(background=\"white\",fg=\"black\")\r\n text_area.configure(bg=\"white\",fg=\"black\")\r\n con.configure(bg=\"white\",fg=\"black\")\r\n\r\n\r\n\r\n# Audio Reading\r\nis_playing = False\r\ndef play_pause():\r\n temp=Toplevel()\r\n play_button = Button(temp, text=\"Play\", font=(\"Arial\", 12), command=play_pause, bg=\"green\", fg=\"white\")\r\n play_button.pack(side=LEFT, padx=10, pady=10)\r\n\r\n pause_button = Button(temp, text=\"Pause\", font=(\"Arial\", 12), command=play_pause, bg=\"red\", fg=\"white\")\r\n pause_button.pack(side=LEFT, padx=10, pady=10)\r\n pause_button.config(state=\"disabled\")\r\n global is_playing\r\n if not is_playing:\r\n content = input_data.get(1.0, END)\r\n if content:\r\n is_playing = True\r\n play_button.config(text=\"Pause\")\r\n speech = gTTS(content)\r\n speech.save(\"text.mp3\")\r\n os.system(\"start text.mp3\")\r\n else:\r\n is_playing = False\r\n play_button.config(text=\"Play\")\r\n os.system(\"taskkill /f /im Music.UI.exe\")\r\n\r\n\r\n#Data rendering\r\ndef fetch_wiki_data():\r\n search_query = enter.get()\r\n\r\n try:\r\n result = wiki.summary(search_query)\r\n text_area.delete(1.0, END)\r\n text_area.insert(INSERT, result)\r\n\r\n except wiki.exceptions.DisambiguationError as e:\r\n\r\n # Limiting the number of option for simplicity\r\n options = e.options[:30]\r\n text_area.delete(1.0, END)\r\n text_area.insert(INSERT, f'Please choose one of the option : \\n\\n')\r\n\r\n for i, option in enumerate(options):\r\n text_area.insert(INSERT, f'{i + 1}. {option}\\n')\r\n\r\n except wiki.exceptions.PageError:\r\n text_area.delete(1.0, END)\r\n text_area.insert(INSERT, f'No result found for \\'{search_query}\\'.')\r\n\r\n\r\n# Creating Menubar\r\nmenubar = Menu(root,activebackground=\"#98F5FF\")\r\n\r\n# Adding File Menu and commands\r\nfile = Menu(menubar, tearoff=0,bg=\"#98F5FF\")\r\nmenubar.add_cascade(label='File', menu=file)\r\nfile.add_command(label='New File', command=None)\r\nfile.add_command(label='Open...', command=open_file)\r\nfile.add_command(label='Save', command=save_as)\r\nfile.add_separator()\r\nfile.add_command(label='Exit', command=root.destroy)\r\n\r\n# Adding Edit Menu and commands\r\nedit = Menu(menubar, tearoff=0,bg=\"#98F5FF\")\r\nmenubar.add_cascade(label='Edit', menu=edit)\r\nedit.add_command(label='Cut', command=cut)\r\nedit.add_command(label='Copy', command=copy)\r\nedit.add_command(label='Paste', command=paste)\r\nedit.add_command(label='Select All', command=all)\r\nedit.add_separator()\r\nedit.add_command(label='Find...', command=None)\r\n\r\n#Adding run menu\r\nrun_bar = Menu(menubar, tearoff=0,bg=\"#98F5FF\")\r\nrun_bar.add_command(label='Run', command=run)\r\nmenubar.add_cascade(label='Run', menu=run_bar)\r\n\r\n#adjusting properties\r\nlayout_bar = Menu(menubar, tearoff=0,bg=\"#98F5FF\")\r\nlayout_bar.add_command(label='color', command=color)\r\nlayout_bar.add_command(label=\"Fonts\",command=font_changed)\r\nmenubar.add_cascade(label='View', menu=layout_bar)\r\n\r\ntheme= Menu(menubar, tearoff=0,bg=\"#98F5FF\")\r\nmenubar.add_cascade(label='Theme', menu=theme)\r\ntheme.add_radiobutton(label=\"Dark mode\",command=lambda x=1:Change_theme(x))\r\ntheme.add_radiobutton(label=\"White mode\",command=lambda x=2:Change_theme(x))\r\n\r\n#Adding tools\r\ntool_bar = Menu(menubar, tearoff=0,bg=\"#98F5FF\")\r\ntool_bar.add_command(label='Image Processing', command=action)\r\ntool_bar.add_separator()\r\ntool_bar.add_command(label='Data Rendering', command=None)\r\ntool_bar.add_separator()\r\ntool_bar.add_command(label='Audio reading', command=play_pause)\r\nmenubar.add_cascade(label='Tools', menu=tool_bar)\r\n\r\n\r\n# Adding Help Menu\r\nhelp_ = Menu(menubar, tearoff=0,bg=\"#98F5FF\")\r\nmenubar.add_cascade(label='Help', menu=help_)\r\nhelp_.add_command(label='Tk Help', command=None)\r\nhelp_.add_command(label='Demo', command=None)\r\nhelp_.add_separator()\r\nhelp_.add_command(label='About Tk', command=None)\r\n\r\n\r\n\r\n# display Menu\r\nroot.config(menu=menubar)\r\n\r\npane=PanedWindow(root,width=500)\r\npane.pack(fill=\"both\",expand=1,side=LEFT)\r\n\r\ninput_data=scrolledtext.ScrolledText(pane,bd=7)\r\npane.add(input_data)\r\n\r\n\r\npane1=PanedWindow(pane,orient=VERTICAL,width=200)\r\npane.add(pane1)\r\n\r\nl1=Label(pane1,text=\"Console\")\r\npane1.add(l1)\r\n\r\ncon=Text(pane1,height=10)\r\npane1.add(con)\r\n\r\nlabel = Label(pane1, text = 'Enter search query : ')\r\npane1.add(label)\r\n\r\nenter = Entry(pane1)\r\npane1.add(enter)\r\n\r\n# Create a button to fetch Wikipedia data\r\nbutton=Button(pane1, text = 'Fetch Data',bd=5, command = fetch_wiki_data)\r\npane1.add(button)\r\n\r\n# Create a scrolled text widget to display the fetch data\r\ntext_area = scrolledtext.ScrolledText(pane1, x = 80, y = 20,)\r\npane1.add(text_area)\r\n\r\n\r\nroot.mainloop()\r\n","repo_name":"arihant0907/Integrated_editor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33417716248","text":"from flask import Flask\nfrom flask_restful import Api\nfrom flask_cors import CORS\nfrom flask_migrate import Migrate\nfrom models.db import db\nfrom models.user import User\nfrom models.post import Post\nfrom models.comment import Comment\nfrom resources import user, auth, comment, post\n\napp = Flask(__name__)\nCORS(app)\napi = Api(app)\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_DATABASE_URI'] = \"postgresql://localhost:5432/trioblog_db\"\napp.config['SQLALCHEMY_ECHO'] = True\n\ndb.init_app(app)\nmigrate = Migrate(app, db)\n\n# auth\napi.add_resource(auth.Login, '/user/login')\napi.add_resource(auth.Register, '/user/register')\n# User Routes\napi.add_resource(user.UsersDetail, '/user/<string:user_id>')\napi.add_resource(user.AllUsers, '/users')\n# Post Routes\napi.add_resource(post.UserPosts, '/user/posts/<string:user_name>')\napi.add_resource(post.PostId, '/post/<string:post_id>')\napi.add_resource(post.Posts, '/posts')\n# Comment Routes\napi.add_resource(comment.PostComments, '/post/comments/<string:post_id>')\napi.add_resource(comment.UserComments, '/user/comments/<string:user_id>')\napi.add_resource(comment.Comments, '/comments')\napi.add_resource(comment.CommentId, '/comments/<string:comment_id>')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"TrioBlog/TrioBlog","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8196650064","text":"import unittest\n\nfrom lib.config import Config\nfrom lib.parser.kotlin.kotlin import KotlinParser\nfrom model.component import Component\n\n\nclass KotlinParserTest(unittest.TestCase):\n\n def test_should_recognize_abstraction(self):\n parser = KotlinParser(Config(\n \".kt\",\n [],\n [],\n []\n ))\n with open('./fixtures/ReportDataSource.kt', 'r') as file:\n code = file.read()\n result = parser.is_abstraction(code)\n\n self.assertTrue(result)\n","repo_name":"MrIceman/spyder","sub_path":"lib/parser/kotlin/kotlin_test.py","file_name":"kotlin_test.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6148083171","text":"import numpy as np\nimport nibabel as nib\nimport ants\nimport SimpleITK as sitk\nimport itk as itk\n\n\ndef dice(segm_temp, segm_ref):\n \"\"\"One evaluation is with DICE:\n 2 * sum(union(Temp_segm, Ref_segm)) / (sum(Temp_segm) + sum(Ref_segm))\n \"\"\"\n segm_types = np.asarray([1, 2, 3, 4]) # assumed we have these four segm labels\n\n dice_scores = []\n for i in range(segm_types.size):\n k = segm_types[i]\n t = np.copy(segm_temp)\n r = np.copy(segm_ref)\n\n t[t != k] = 0\n r[r != k] = 0\n\n if not np.any(t):\n if not np.any(r):\n dice = None\n weight = 0\n else:\n dice = 2 * np.sum(np.multiply(t, r)/k**2) / (np.sum(t)/k + np.sum(r)/k + 0.000001)\n # weight = 1\n elif not np.any(r):\n dice = 2 * np.sum(np.multiply(t, r)/k**2) / (np.sum(t)/k + np.sum(r)/k + 0.000001)\n # weight = 1\n else:\n dice = 2 * np.sum(np.multiply(t, r)/k**2) / (np.sum(t)/k + np.sum(r)/k + 0.000001)\n # weight = 2\n dice_scores.append(dice)\n return dice_scores\n\n\ndef calculate_dice_values(seg_temp_dir, seg_ref_dir, def_field, framework, save_dir=None, case=None, slicewise=False):\n \"\"\"calculates dice values by first applying the deformation on the \n segmentation masks and then calculating the dice-overlap of all (4) segms.\n \n Depending on the registration framework, different steps are required.\n \"\"\"\n if framework == 'ANTs':\n seg_t = nib.load(seg_temp_dir)\n seg_t = np.round(seg_t.get_data().astype(float))\n seg_r = nib.load(seg_ref_dir)\n seg_r = np.round(seg_r.get_data().astype(float))\n\n seg_t_moved = ants.apply_transforms(ants.from_numpy(seg_r), ants.from_numpy(seg_t), def_field, interpolator='genericLabel')\n seg_t_moved = np.round(seg_t_moved.numpy()) # need natural numbers\n\n if save_dir:\n name = seg_temp_dir.rsplit(sep='/', maxsplit=1)[-1]\n ants.image_write(ants.from_numpy(seg_t_moved), save_dir + '/' + '_' + \"moved_\" + name)\n\n elif framework == 'corrfield':\n seg_r = sitk.ReadImage(seg_ref_dir, sitk.sitkFloat32)\n seg_r.SetDirection([1, 0, 0, 0, 1, 0, 0, 0, 1])\n seg_t = sitk.ReadImage(seg_temp_dir, sitk.sitkFloat32)\n seg_t.SetDirection([1, 0, 0, 0, 1, 0, 0, 0, 1])\n\n resampler = sitk.ResampleImageFilter()\n resampler.SetReferenceImage(seg_r);\n resampler.SetInterpolator(sitk.sitkNearestNeighbor)\n resampler.SetDefaultPixelValue(0.0)\n resampler.SetTransform(def_field)\n seg_t_moved = resampler.Execute(seg_t)\n seg_t_moved.SetDirection([-1, 0, 0, 0, -1, 0, 0, 0, 1])\n seg_t_moved.SetOrigin([0, 0, 0])\n\n if save_dir:\n name = seg_temp_dir.rsplit(sep='/', maxsplit=1)[-1]\n sitk.WriteImage(seg_t_moved, save_dir + '/' + case + '_' + \"moved_\" + name)\n \n seg_t_moved = sitk.GetArrayFromImage(seg_t_moved).transpose(1, 2, 0)\n seg_r = sitk.GetArrayFromImage(seg_r).transpose(1, 2, 0)\n\n elif framework == 'elastix':\n seg_moving = itk.imread(seg_temp_dir, itk.F)\n seg_r = itk.imread(seg_ref_dir, itk.F)\n\n transform_map = def_field.GetParameterMap(0)\n transform_map['ResampleInterpolator'] = [\"FinalNearestNeighborInterpolator\"]\n\n parameter_new = itk.ParameterObject.New()\n parameter_new.SetParameterMap(transform_map)\n\n seg_t_moved = itk.transformix_filter(\n seg_moving,\n parameter_new)\n\n if save_dir:\n name = seg_temp_dir.rsplit(sep='/', maxsplit=1)[-1]\n itk.imwrite(seg_t_moved, save_dir + '/' + case + '_' + \"moved_\" + name)\n\n seg_t_moved = itk.GetArrayFromImage(seg_t_moved).transpose(1, 2, 0)\n seg_r = itk.GetArrayFromImage(seg_r).transpose(1, 2, 0)\n\n if slicewise:\n dice_vals = []\n for i in range(seg_t_moved.shape[-1]):\n dice_vals.append(dice(seg_t_moved[:, :, i], seg_r[:, :, i]))\n return dice_vals\n else:\n return dice(seg_t_moved, seg_r)\n\n\ndef calculate_initial_dice_values(seg_temp_dir, seg_ref_dir, slicewise=False):\n seg_t = nib.load(seg_temp_dir)\n seg_t = np.round(seg_t.get_data().astype(float))\n seg_r = nib.load(seg_ref_dir)\n seg_r = np.round(seg_r.get_data().astype(float))\n\n if slicewise:\n dice_vals = []\n for i in range(seg_t.shape[-1]):\n dice_vals.append(dice(seg_t[:, :, i], seg_r[:, :, i]))\n return dice_vals\n else:\n return dice(seg_t, seg_r)\n","repo_name":"nilsFrohwitter/I2I-Synthesis","sub_path":"evaluation/eval_reg.py","file_name":"eval_reg.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27784654409","text":"import cv2\nimport imutils\nimport numpy as np\nfrom google.cloud import vision\n\ndef align_images(image, template):\n # convert both the input image and template to grayscale\n imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n templateGray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)\n \n # use ORB to detect keypoints and extract (binary) local\n # invariant features\n sift = cv2.SIFT_create()\n keypoints1, descriptors1 = sift.detectAndCompute(imageGray, None)\n keypoints2, descriptors2 = sift.detectAndCompute(templateGray, None)\n \n # match the features\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(descriptors1, descriptors2, k=2)\n \n # sort the matches by their distance (the smaller the distance,\n # the \"more similar\" the features are)\n good_matches = []\n for m, n in matches:\n if m.distance < 0.6 * n.distance:\n good_matches.append([m])\n # check to see if we should visualize the matched keypoints\n matchedVis = cv2.drawMatchesKnn(image, keypoints1,template, keypoints2, good_matches,None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n matchedVis = imutils.resize(matchedVis, width=1000)\n points1 = np.float32([keypoints1[m[0].queryIdx].pt for m in good_matches])\n points2 = np.float32([keypoints2[m[0].trainIdx].pt for m in good_matches])\n\n (h, mask) = cv2.findHomography(points1, points2, method=cv2.RANSAC)\n height, width, channels = template.shape\n aligned = cv2.warpPerspective(image, h, (width, height))\n return aligned\n\ndef ocr(card_image):\n client = vision.ImageAnnotatorClient()\n image = vision.Image(content=card_image)\n response = client.text_detection(image=image)\n texts = response.text_annotations\n return texts\n","repo_name":"ingchoff/ocr-card-backend","sub_path":"utils/ImageProcess.py","file_name":"ImageProcess.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70950141386","text":"flagList = [0,1]\nglobCounter = 0\ndrawing = flagList[0]\nmode, connect = True, False\ndrawPath = [] # x ,y, color, size\npointsRectangle = [] # poits for drawing rectangle\n\nwindowWidth, windowHeight = 500, 300\n# sequence: hMin, sMin, vMin, hMax, sMax, vMax\ndeepGreen = [55,79,0,117,255,255]\nlightRed = [78,133,104,179,255,255]\ndeepBlue = [47, 141, 0, 124, 186, 238]\ncolorList = [deepGreen, lightRed]\n\n#Coresponding color for drawing\ngreen = [0,255,0]\nred = [0,0,255]\nblue = [255,0,0]\ncolorDrawing = [green, red]","repo_name":"Awekabaz/virtualDrawing","sub_path":"atrs.py","file_name":"atrs.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18460955695","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 19 08:57:26 2022\n\n@author: Sofie\n\"\"\"\n# Importere nødvendige pakker\nimport numpy as np\nimport nibabel as nib\nimport os\n\n# Sette directory filene skal hentes fra, og nytt directory\ndirectory = 'data/image_data/ous/tumor_ds/ct'\nnew_directory = 'data/image_data/ous/tumor_ds/ct_windowed'\n \n# Definere window width og window level for CT-bildet\ncenter = 60\nwidth = 350\n\n\n# Iterere over alle bildene i en for-løkke og lagre i ny mappe med\n# preproseserte bilder.\n\nfor filename in os.listdir(directory):\n f = os.path.join(directory, filename)\n if os.path.isfile(f):\n img = nib. load(directory+'/'+filename)\n data = img. get_data()\n data=data-1024 # Konvertere til HU\n \n data=np.where(data >= width/2+center, width/2+center, data)\n data=np.where(data<=-width/2+center, -width/2+center, data)\n ct_windowed = data+ width/2-center\n new_image = nib.Nifti1Image(ct_windowed, affine=np.eye(4))\n nib.save(new_image, os.path.join(new_directory, filename))\n","repo_name":"sofiefj/Bellossom","sub_path":"Preprosessering/CT-windowing.py","file_name":"CT-windowing.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11751264489","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def sumOfLeftLeaves(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n return self.sumOfLeftLeavesRecursive(root, False)\n\n def sumOfLeftLeavesRecursive(self, node, is_left):\n if node is None:\n return 0\n else:\n if is_left and node.left is None and node.right is None:\n sum = node.val\n else:\n sum = 0\n sum += self.sumOfLeftLeavesRecursive(node.left, True)\n sum += self.sumOfLeftLeavesRecursive(node.right, False)\n return sum","repo_name":"fanzeng/Leetcode","sub_path":"404/404_Sum_of_Left_Leaves.py","file_name":"404_Sum_of_Left_Leaves.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11185533283","text":"__author__ = 'wwh'\n\n#一个可迭代的对象,获取其中指定的元素\n\n#求平均值函数\ndef avg(mid):\n sum = 0\n for i in range(len(mid)):\n print(i,mid[i])\n sum += mid[i]\n return sum/(i+1)\n\n#去除首尾元素求平均值\ndef drop_first_last(grades):\n # *号是可变参数,表示一个元组\n # **号也是可变参数,表示键值\n first, *middle, last = grades\n print(type(middle))\n return avg(middle)\n\nif __name__ == '__main__':\n #任意可迭代的元素都可以分解为单独的变量\n t = (1,2,3)\n x,y,z = t\n print(x,y,z)\n s = 'hello'\n a,b,c,d,e = s\n print(a,b,c,d,e)\n #给出一组成绩,求其平均值\n student_grade = [88,99,100,65,78,66]\n print(drop_first_last(student_grade))\n","repo_name":"wangweihao/PythonCookbook","sub_path":"decopose.py","file_name":"decopose.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24657168864","text":"from __future__ import division\nimport numpy as np\nfrom milk.unsupervised.kmeans import select_best_kmeans, assign_centroids\nfrom .base import supervised_model, base_adaptor\nimport multiprocessing\nfrom milk.utils import parallel\nfrom milk import defaultlearner\n\nclass precluster_model(supervised_model):\n def __init__(self, centroids, base):\n self.centroids = centroids\n self.base = base\n self.normalise = True\n\n def apply(self, features):\n histogram = assign_centroids(features, self.centroids, histogram=True, normalise=self.normalise)\n return self.base.apply(histogram)\n\n\nclass precluster_learner(object):\n '''\n This learns a classifier by clustering the input features\n '''\n def __init__(self, ks, base=None, R=None):\n if base is None:\n base = defaultlearner()\n self.ks = ks\n self.R = R\n self.base = base\n self.normalise = True\n\n def set_option(self, k, v):\n if k in ('R', 'ks'):\n setattr(self, k, v)\n else:\n self.base.set_option(k,v)\n\n def train(self, features, labels, **kwargs):\n allfeatures = np.vstack(features)\n assignments, centroids = select_best_kmeans(allfeatures, self.ks, repeats=1, method=\"AIC\", R=self.R)\n histograms = [assign_centroids(f, centroids, histogram=True, normalise=self.normalise) for f in features]\n base_model = self.base.train(histograms, labels, **kwargs)\n return precluster_model(centroids, base_model)\n\nclass codebook_model(supervised_model):\n def __init__(self, centroids, base, normalise):\n self.centroids = centroids\n self.base = base\n self.normalise = normalise\n\n def apply(self, features):\n from milk.unsupervised.kmeans import assign_centroids\n f0,f1 = features\n features = assign_centroids(f0, self.centroids, histogram=True, normalise=self.normalise)\n if f1 is not None and len(f1):\n features = np.concatenate((features, f1))\n return self.base.apply(features)\n\n\nclass codebook_learner(base_adaptor):\n def set_option(self, k, v):\n if k != 'codebook':\n raise KeyError('milk.precluster.codebook_learner: unknown option `%s`' % k)\n self.codebook = v\n\n def train(self, features, labels, **kwargs):\n from milk.unsupervised.kmeans import assign_centroids\n tfeatures = np.array([ assign_centroids(f, self.codebook, histogram=True, normalise=self.normalise)\n for f,_ in features])\n tfeatures = np.hstack((tfeatures, np.array([f for _,f in features])))\n base_model = self.base.train(tfeatures, labels, **kwargs)\n return codebook_model(self.codebook, base_model, self.normalise)\n\nclass kmeans_cluster(multiprocessing.Process):\n def __init__(self, features, inq, outq):\n self.features = features\n self.inq = inq\n self.outq = outq\n\n def execute(self):\n import milk\n while True:\n k,ri = self.inq.get()\n if k == 'shutdown':\n return\n _,centroids = milk.kmeans(self.features, k=k, R=(k*1024+ri))\n self.outq.put(centroids)\n\n def run(self):\n try:\n self.execute()\n except Exception as e:\n errstr = r'''\\\nError in milk.supervised.precluster.learn_codebook internal\n\nException was: %s\n\nOriginal Traceback:\n%s\n\n(Since this was run on a different process, this is not a real stack trace).\n''' % (e, traceback.format_exc())\n self.outq.put( ('error', errstr) )\n\n\nclass select_precluster(object):\n\n def __init__(self, ks, base, normalise=True, rmax=16):\n self.base = base\n self.ks = ks\n self.rmax = rmax\n self.sample = 16\n self.nfolds = 5\n self.normalise = normalise\n\n def train(self, features, labels, **kwargs):\n from milk.supervised.gridsearch import gridminimise\n c_features = np.concatenate([f for f,_ in features if len(f)])\n c_features = c_features[::self.sample]\n nprocs = parallel.get_procs(use_current=True)\n tow = multiprocessing.Queue()\n fromw = multiprocessing.Queue()\n for k in self.ks:\n for ri in range(self.rmax):\n tow.put((k,ri))\n for i in range(nprocs):\n tow.put(('shutdown',None))\n workers = [kmeans_cluster(c_features, tow, fromw) for i in range(nprocs)]\n for w in workers:\n if nprocs > 1:\n w.start()\n else:\n w.execute()\n try:\n codebooks = [fromw.get() for i in range(len(self.ks)*self.rmax)]\n finally:\n tow.close()\n tow.join_thread()\n if nprocs > 1:\n for w in workers:\n w.join()\n parallel.release_procs(len(workers), count_current=True)\n\n base = codebook_learner(self.base)\n base.normalise = self.normalise\n if len(codebooks) > 1:\n (best,) = gridminimise(base, features, labels, { 'codebook' : codebooks }, nfolds=self.nfolds)\n _,codebook = best\n else:\n (codebook,) = codebooks\n base.codebook = codebook\n return base.train(features, labels)\n\nclass frac_precluster_learner(object):\n\n def __init__(self, k=None, kfrac=None, sample=16):\n self.k = k\n self.kfrac = kfrac\n self.sample = sample\n\n def train(self, features, labels, R=134, **kwargs):\n from milk.supervised.gridsearch import gridminimise\n from milk.supervised import svm\n c_features = np.concatenate([f for f,_ in features if f.size])\n c_features = c_features[::self.sample]\n\n learner = milk.defaultlearner()\n k = (self.k if self.k is not None else len(features)//self.kfrac)\n _,codebook = milk.kmeans(c_features, k=k, R=R)\n features = project.f(features, codebook)\n model = learner.train(features, labels)\n return codebook_model(codebook, model)\n\n","repo_name":"luispedro/milk","sub_path":"milk/supervised/precluster.py","file_name":"precluster.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","stars":610,"dataset":"github-code","pt":"81"} +{"seq_id":"872250910","text":"sub_lists = []\nlist1 = ['3', '3', 'x', '^', '3', '+', '2', '2', 'x', '^', '1', '+', '1']\ncurrent_start = 0\ncurrent_end = 0\ni = 0\nwhile i < len(list1)-1:\n current_end=i\n if list1[i]==\"+\":\n sub_lists.append(list1[current_start:current_end])\n current_start = i+1\n i+=1\nprint(sub_lists)\n \n","repo_name":"wjaneal/ICS3U","sub_path":"JAKA/Jeffry/rouph.py","file_name":"rouph.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42016882288","text":"import spi\ns = spi.SPI('/dev/spidev32765.0', 0, 100000)\nfrom bitarray import bitarray\n\nfrom statistics import mode, mean, StatisticsError\n\nimport json\nimport time\nimport itertools\n\nprinter = lambda xs: ''.join([{0: '░', 1: '█', 2: '╳'}[x] for x in xs])\ndebinary = lambda ba: sum([x*(2**i) for (i,x) in enumerate(reversed(ba))])\n\nimport tsd_client\n\nilen = lambda it: sum(1 for _ in it)\nrle = lambda xs: ((ilen(gp), x) for x, gp in itertools.groupby(xs))\nrld = lambda xs: itertools.chain.from_iterable(itertools.repeat(x, n) for n, x in xs)\n\nclass PacketBase(object):\n def __init__(self, packet = [], errors = None, deciles = {}, raw = []):\n self.packet = packet\n self.errors = errors\n self.deciles = deciles\n self.raw = raw\n\ndef get_decile_durations(pulses):\n values = set([value for (width, value) in pulses])\n deciles = {}\n if len(pulses) < 10:\n return None\n for value in sorted(list(values)):\n counts = sorted([width for (width, x) in pulses if x == value])\n tenth = len(counts) // 10\n if not tenth:\n return None\n short_decile = int(mean(counts[1*tenth:2*tenth]))\n long_decile = int(mean(counts[8*tenth:9*tenth]))\n deciles[value] = (short_decile, long_decile)\n return deciles\n\ndef find_pulse_groups(pulses, deciles):\n # find segments of quiet that are 9x longer than the short period\n # this is naive, if a trivial pulse width encoding is used, any sequence of 9 or more short sequential silences will be read as a packet break\n breaks = [i[0] for i in enumerate(pulses) if (i[1][0] > min(deciles[0][0],deciles[1][0]) * 9) and (i[1][1] == False)]\n # find periodicity of the packets\n break_deltas = [y-x for (x,y) in zip(breaks, breaks[1::])]\n if len(break_deltas) < 2:\n return None\n # ignoring few-pulse packets, if you have more than three different fragment sizes, try to regularize\n elif len(set([bd for bd in break_deltas if bd > 3])) > 3:\n try:\n d_mode = mode(break_deltas)\n # if all values different, use mean as mode\n except StatisticsError:\n d_mode = round(mean(break_deltas))\n # determine expected periodicity of packet widths\n breaks2 = [x*d_mode for x in range(round(max(breaks) // d_mode))]\n if len(breaks2) < 2:\n return None\n # discard breaks more than 10% from expected position\n breaks = [x for x in breaks if True in [abs(x-y) < breaks2[1]//10 for y in breaks2]]\n # define packet pulses as the segment between breaks\n return breaks\n\ndef demodulator(pulses):\n packets = []\n # drop short (clearly erroneous, spurious) pulses\n pulses = [x for x in rle(rld([x for x in pulses if x[0] > 2]))]\n deciles = get_decile_durations(pulses)\n if not deciles:\n return packets\n breaks = find_pulse_groups(pulses, deciles)\n if not breaks:\n return packets\n for (x,y) in zip(breaks, breaks[1::]):\n packet = pulses[x+1:y]\n pb = []\n errors = []\n # iterate over packet pulses\n for chip in packet:\n valid = False\n for v in deciles.keys():\n for (i, width) in enumerate(deciles[v]):\n if (not valid) and (chip[1] == v) and (abs(chip[0] - width) < width // 2):\n pb += [v]*(i+1)\n valid = True\n if not valid:\n errors += [chip]\n pb += [2]\n if len(pb) > 4:\n result = PacketBase(pb, errors, deciles, pulses[x:y])\n packets.append(result)\n return packets\n\nba = bitarray(endian='big')\n\ndef silver_sensor(packet):\n if packet.errors == []:\n bits = [x[0] == 2 for x in rle(packet.packet) if x[1] == 0]\n # some thanks to http://forum.iobroker.net/viewtopic.php?t=3818\n # \"TTTT=Binär in Dez., Dez als HEX, HEX in Dez umwandeln (zB 0010=2Dez, 2Dez=2 Hex) 0010=2 1001=9 0110=6 => 692 HEX = 1682 Dez = >1+6= 7 UND 82 = 782°F\"\n if len(bits) == 42:\n fields = [0,2,8,2,2,4,4,4,4,4,8]\n fields = [x for x in itertools.accumulate(fields)]\n results = [debinary(bits[x:y]) for (x,y) in zip(fields, fields[1:])]\n # uid is never 0xff, but similar protocols sometimes decode with this field as 0xFF\n if results[1] == 255:\n return None\n temp = (16**2*results[6]+16*results[5]+results[4])\n humidity = (16*results[8]+results[7])\n if temp > 1000:\n temp %= 1000\n temp += 100\n temp /= 10\n temp -= 32\n temp *= 5/9\n return {'uid':results[1], 'temperature': temp, 'humidity': humidity, 'channel':results[3], 'metameta': packet.__dict__}\n return None\n\n# block size\nbs = 32768\n\nlast = {}\nwhile True:\n p = s.transfer([0]*bs)\n # if input values are all-high or all-low\n ba = bitarray(endian='big')\n ba.frombytes(bytes(p))\n pulses = [(w,v*1) for (w,v) in rle(ba)]\n if len(pulses) > 10:\n current_time = time.time()\n for packet in demodulator(pulses):\n print(printer(packet.packet))\n send_and_update = False\n res = silver_sensor(packet)\n if res is not None:\n uid = res['uid']\n else:\n uid = None\n if (uid in last.keys()) and ((time.time() - last[uid]) > 30):\n send_and_update = True\n else:\n last[uid] = time.time()\n if (res is not None) and send_and_update:\n last[uid] = time.time()\n tsd_client.log(res)\n","repo_name":"itdaniher/python-spirit1","sub_path":"packetizer.py","file_name":"packetizer.py","file_ext":"py","file_size_in_byte":5665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74794722505","text":"class Node:\r\n def __init__(self, val, start, end, left=None, right=None):\r\n self.val = val\r\n self.start = start\r\n self.end = end\r\n self.left = left\r\n self.right = right\r\n\r\nclass segmental_tree:\r\n def __init__(self, nums):\r\n self.root = self.build(nums, 0, len(nums)-1)\r\n \r\n def build(self, nums, start, end):\r\n if start == end:\r\n return Node(nums[start], start, end)\r\n \r\n mid = (start + end) // 2\r\n left = self.build(nums, start, mid)\r\n right = self.build(nums, mid+1, end)\r\n \r\n value = 0\r\n if left:\r\n value += left.val\r\n if right:\r\n value += right.val\r\n \r\n return Node(value, start, end, left, right)\r\n \r\n def update(self, index, value):\r\n self._update(self.root, index, index, value)\r\n \r\n def query(self, left, right):\r\n return self._query(self.root, left, right)\r\n \r\n def _update(self, root, start, end, value):\r\n if start > end:\r\n return\r\n \r\n if root.start == start and root.end == end:\r\n root.val = value\r\n return\r\n \r\n mid = (root.start + root.end) // 2\r\n if end <= mid:\r\n self._update(root.left, start, end, value)\r\n elif mid < start:\r\n self._update(root.right, start, end, value)\r\n else:\r\n self._update(root.left, start, mid, value)\r\n self._update(root.right, mid+1, end, value)\r\n \r\n root.val = 0\r\n if root.left:\r\n root.val += root.left.val\r\n if root.right:\r\n root.val += root.right.val\r\n \r\n def _query(self, root, start, end):\r\n if start > end:\r\n return 0\r\n \r\n if root.start == start and root.end == end:\r\n return root.val\r\n \r\n mid = (root.start + root.end) // 2\r\n if end <= mid:\r\n return self._query(root.left, start, end)\r\n elif mid < start:\r\n return self._query(root.right, start, end)\r\n else:\r\n return self._query(root.left, start, mid) + self._query(root.right, mid+1, end)\r\n\r\nclass NumArray:\r\n\r\n def __init__(self, nums: List[int]):\r\n self.segTree = segmental_tree(nums)\r\n\r\n def update(self, index: int, val: int) -> None:\r\n self.segTree.update(index, val)\r\n\r\n def sumRange(self, left: int, right: int) -> int:\r\n return self.segTree.query(left, right)\r\n\r\n\r\n# Your NumArray object will be instantiated and called as such:\r\n# obj = NumArray(nums)\r\n# obj.update(index,val)\r\n# param_2 = obj.sumRange(left,right)","repo_name":"novayo/LeetCode","sub_path":"0307_Range_Sum_Query_-_Mutable/try_4.py","file_name":"try_4.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"25826955796","text":"import numpy as np\nimport pandas as pd\nimport jieba\n\n# http://blog.csdn.net/eastmount/article/details/50323063\n# http://blog.csdn.net/eastmount/article/details/50256163\n# http://blog.csdn.net/lsldd/article/details/41542107\n\"\"\"舆情分析\"\"\"\n####################################\n# 第一步 读取数据及分词\n#\ndata = pd.read_excel(\"D:\\\\project\\\\datawj\\\\pinglun.xlsx\", encoding='gbk')\nprint(data)\n\n# 取表中的第1列的所有值\nprint(\"获取第一列内容\")\ncol = data.iloc[:, 0]\n\n# 取表中所有值\narrs = col.values\n\n# 去除停用词\nstopwords = {}.fromkeys([',', '。', '!', '这', '我', '非常'])\n# with open('stopwords.txt','r') as f:\n# stopwords = f.read()\nprint(u\"\\n中文分词后结果:\")\ncorpus = []\nfor a in arrs:\n # print a\n seglist = jieba.cut(a, cut_all=False) # 精确模式\n final = ''\n for seg in seglist:\n # seg = seg.encode('utf-8')\n if seg not in stopwords: # 不是停用词的保留\n final += seg\n seg_list = jieba.cut(final, cut_all=False)\n output = ' '.join(list(seg_list)) # 空格拼接\n print(output)\n corpus.append(output)\nprint(corpus)\n\n####################################\n# 第二步 计算词频\n#\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.linear_model import LogisticRegression\n\nvectorizer = CountVectorizer() # 将文本中的词语转换为词频矩阵\nX = vectorizer.fit_transform(corpus) # 计算个词语出现的次数\nword = vectorizer.get_feature_names() # 获取词袋中所有文本关键词\nfor w in word: # 查看词频结果\n print(w)\nprint('')\nprint(X.toarray())\n\n####################################\n# 第三步 数据分析\n#\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import classification_report\n\n# 使用前8行数据集进行训练,最后两行数据集用于预测\nprint(u\"\\n\\n数据分析:\")\nX = X.toarray()\nx_train = X[:8]\nx_test = X[8:]\n# 1表示好评 0表示差评\ny_train = [1, 1, 1, 0, 1, 0, 0, 1]\ny_test = [0, 0]\n# 调用MultinomialNB分类器\nclf = LogisticRegression().fit(x_train, y_train)\npre = clf.predict(x_test)\nprint(u\"预测结果:\", pre)\nprint(u\"真实结果:\", y_test)\n\nfrom sklearn.metrics import classification_report\n\nprint(classification_report(y_test, pre))\n","repo_name":"AdDantes/ML","sub_path":"datawj/yuqing.py","file_name":"yuqing.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12120402347","text":"\nfrom deepSI.systems.system import System, System_io, System_data, load_system\nimport numpy as np\nfrom deepSI.datasets import get_work_dirs\nimport deepSI\nimport torch\nfrom torch import nn, optim\nfrom tqdm.auto import tqdm\nimport time\nfrom pathlib import Path\nimport os.path\nfrom torch.utils.data import Dataset, DataLoader\nimport itertools\nfrom copy import deepcopy\nimport warnings\n\nclass System_fittable(System):\n \"\"\"Subclass of system which introduces a .fit method which calls ._fit to fit the systems\n\n Notes\n -----\n This function will automaticly fit the normalization in self.norm if auto_fit_norm is set to True (default). \n Lastly it will set self.init_model_done to True which will keep the norm constant. \n \"\"\"\n def init_model(self, sys_data=None, nu=-1, ny=-1, auto_fit_norm=True):\n if auto_fit_norm: #if the norm is not used you can also manually initialize it.\n #you may consider not using the norm if you have constant values in your training data which can change. They are known to cause quite a number of bugs and errors. \n self.norm.fit(sys_data)\n self.nu = sys_data.nu\n self.ny = sys_data.ny\n self.init_model_done = True\n\n def fit(self, train_sys_data, auto_fit_norm=True, **kwargs):\n if self.init_model_done==False:\n self.init_model(train_sys_data, auto_fit_norm=auto_fit_norm) \n self._fit(self.norm.transform(train_sys_data), **kwargs)\n\n def _fit(self, normed_sys_data, **kwargs):\n raise NotImplementedError('_fit or fit should be implemented in subclass')\n\nclass System_torch(System_fittable):\n '''For systems that utilize torch\n\n Attributes\n ----------\n parameters : list\n The list of fittable network parameters returned by System_torch.init_nets(nu,ny)\n optimizer : torch.Optimizer\n The main optimizer returned by System_torch.init_optimizer\n time : numpy.ndarray\n Current runtime after each epoch\n batch_id : numpy.ndarray\n Current total number of batch optimization steps is saved after each epoch\n Loss_train : numpy.ndarray\n Average training loss for each epoch\n Loss_val : numpy.ndarray\n Validation loss for each epoch\n\n Notes\n -----\n subclasses should define three methods\n (i) init_nets(nu, ny) which returns the network parameters, \n (ii) make_training_data(sys_data, **loss_kwargs)` which converts the normed sys_data into training data (list of numpy arrays),\n (iii) loss(*training_data, **loss_kwargs) which returns the loss using the current training data\n '''\n def init_nets(self, nu, ny):\n '''Defined in subclass and initializes networks and returns the parameters\n\n Parameters\n ----------\n nu : None, int or tuple\n The shape of the input u\n ny : None, int or tuple\n The shape of the output y\n '''\n raise NotImplementedError('init_nets should be implemented in subclass')\n\n # def make_training_data(self, sys_data, **loss_kwargs):\n # '''Defined in subclass which converts the normed sys_data into training data\n\n # Parameters\n # ----------\n # sys_data : System_data or System_data_list\n # Already normalized\n # loss_kwargs : dict\n # loss function settings passed into .fit\n\n # Returns\n # -------\n # data : list or torch.utils.data.Dataset\n # a list of arrays (e.g. [X,Y]) or an instance of torch.utils.data.Dataset\n # '''\n # assert sys_data.normed == True\n # raise NotImplementedError('make_training_data should be implemented in subclass')\n\n def loss(*training_data_batch, **loss_kwargs):\n '''Defined in subclass which take the batch data and calculates the loss based on loss_kwargs\n\n Parameters\n ----------\n training_data_batch : list\n batch of the training data returned by make_training_data and converted to torch arrays\n loss_kwargs : dict\n loss function settings passed into .fit\n '''\n raise NotImplementedError('loss should be implemented in subclass')\n\n def init_optimizer(self, parameters, **optimizer_kwargs):\n '''Optionally defined in subclass to create the optimizer\n\n Parameters\n ----------\n parameters : list or list of dict\n system torch parameters\n optimizer_kwargs : dict\n If 'optimizer' is defined than that optimizer will be used otherwise Adam will be used.\n The other parameters will be passed to the optimizer as a kwarg.\n '''\n if optimizer_kwargs.get('optimizer') is not None:\n optimizer_kwargs = deepcopy(optimizer_kwargs) #do not modify the original kwargs, is this necessary\n optimizer = optimizer_kwargs['optimizer']\n del optimizer_kwargs['optimizer']\n else:\n optimizer = torch.optim.Adam\n return optimizer(parameters,**optimizer_kwargs)\n\n def init_scheduler(self, **scheduler_kwargs):\n '''Optionally defined in subclass to create the scheduler\n\n Parameters\n ----------\n scheduler_kwargs : dict\n If 'scheduler' is defined than that scheduler will be used otherwise no scheduler will be used.\n '''\n if not scheduler_kwargs:\n return None\n scheduler_kwargs = deepcopy(scheduler_kwargs)\n scheduler = scheduler_kwargs['scheduler']\n del scheduler_kwargs['scheduler']\n return scheduler(self.optimizer,**scheduler_kwargs)\n\n def init_model(self, sys_data=None, nu=-1, ny=-1, device='cpu', auto_fit_norm=True, optimizer_kwargs={}, parameters_optimizer_kwargs={}, scheduler_kwargs={}):\n '''This function set the nu and ny, inits the network, moves parameters to device, initilizes optimizer and initilizes logging parameters'''\n if sys_data==None:\n assert nu!=-1 and ny!=-1, 'either sys_data or (nu and ny) should be provided'\n self.nu, self.ny = nu, ny\n else:\n self.nu, self.ny = sys_data.nu, sys_data.ny\n if auto_fit_norm:\n if not self.norm.is_id:\n warnings.warn('Fitting the norm due to auto_fit_norm=True')\n self.norm.fit(sys_data)\n self.init_nets(self.nu, self.ny)\n self.to_device(device=device)\n parameters_and_optim = [{**item,**parameters_optimizer_kwargs.get(name,{})} for name,item in self.parameters_with_names.items()]\n self.optimizer = self.init_optimizer(parameters_and_optim, **optimizer_kwargs)\n self.scheduler = self.init_scheduler(**scheduler_kwargs)\n self.bestfit = float('inf')\n self.Loss_val, self.Loss_train, self.batch_id, self.time, self.epoch_id = np.array([]), np.array([]), np.array([]), np.array([]), np.array([])\n self.init_model_done = True\n\n @property\n def parameters(self):\n return [item for key,item in self.parameters_with_names.items()]\n @property\n def parameters_with_names(self):\n if hasattr(self,'excluded_nets_from_parameters'):\n excluded_nets = self.excluded_nets_from_parameters\n else:\n excluded_nets = []\n nns = {d:{'params':self.__getattribute__(d).parameters()} for d in dir(self) if \\\n d not in ['parameters_with_names','parameters']+excluded_nets and isinstance(self.__getattribute__(d),nn.Module )}\n pars= {d:{'params':self.__getattribute__(d)} for d in dir(self) if \\\n d not in ['parameters_with_names','parameters']+excluded_nets and isinstance(self.__getattribute__(d),nn.Parameter)}\n return {**nns,**pars}\n\n\n def cal_validation_error(self, val_sys_data, validation_measure='sim-NRMS'):\n '''possible validation_measure are\n 'sim-NRMS'\n 'sim-NRMS_mean_channel'\n 'sim-NRMS_per_channels' (and others defined in System_data)\n 'sim-NRMS_sys_norm'\n \n '10-step-NRMS' or '10-step-average-NRMS'\n '10-step-last-NRMS'\n '10-step-last-RMS'\n '10-step-[w0,w1,w2,w3,w4,w5,w6,w7,w8,w9]-NRMS' weighted mean 10-step-error\n '10-step-NMAE_sys_norm'\n '10-step-MSE'\n 'X-step-{last/average}-{mode}' #like this\n \n #todo;\n User given callback. (overwrite this function?)\n 'loss' #todo\n 'sim-inno' #todo\n '''\n if validation_measure.find('sim')==0:\n val_sys_data_sim = self.apply_experiment(val_sys_data)\n sim_val_fun = validation_measure.split('-')[1]\n if sim_val_fun=='NRMS_sys_norm':\n return self.norm.transform(val_sys_data_sim).RMS(self.norm.transform(val_sys_data))\n else:\n return val_sys_data_sim.__getattribute__(sim_val_fun)(val_sys_data)\n elif validation_measure.find('step')!=-1:\n splitted = validation_measure.split('-')\n nstep = int(splitted[0])\n mode = splitted[-1]\n n_step_error = self.n_step_error(val_sys_data, nf=nstep, stride=1, mode=mode, mean_channels=True)\n\n average_method = 'average' if len(splitted)==3 else splitted[2]\n \n if average_method[0]=='[':\n w = np.array([float(a) for a in average_method[1:-1].split(',')])\n return np.sum(w*n_step_error)/np.sum(w)\n elif average_method=='average':\n return np.mean(n_step_error)\n elif average_method=='last':\n return n_step_error[-1]\n NotImplementedError(f'validation_measure={validation_measure} not implemented, use one as \"sim-NRMS\", \"sim-NRMS_mean_channels\", \"10-step-average-NRMS\", ect.')\n\n def fit(self, train_sys_data, val_sys_data, epochs=30, batch_size=256, loss_kwargs={}, \\\n auto_fit_norm=True, validation_measure='sim-NRMS', optimizer_kwargs={}, concurrent_val=False, cuda=False, \\\n timeout=None, verbose=1, sqrt_train=True, num_workers_data_loader=0, print_full_time_profile=False, scheduler_kwargs={}):\n '''The batch optimization method with parallel validation, \n\n Parameters\n ----------\n train_sys_data : System_data or System_data_list\n The system data to be fitted\n val_sys_data : System_data or System_data_list\n The validation system data after each used after each epoch for early stopping. Use the keyword argument validation_measure to specify which measure should be used. \n epochs : int\n batch_size : int\n loss_kwargs : dict\n The Keyword Arguments to be passed to the self.make_training_data and self.loss of the current fit_system.\n auto_fit_norm : boole\n If true will use self.norm.fit(train_sys_data) which will fit it element wise. \n validation_measure : str\n Specify which measure should be used for validation, e.g. 'sim-RMS', '10-step-last-RMS', 'sim-NRMS_sys_norm', ect. See self.cal_validation_error for details.\n optimizer_kwargs : dict\n The Keyword Arguments to be passed on to init_optimizer. notes; init_optimizer['optimizer'] is the optimization function used (default torch.Adam)\n and optimizer_kwargs['parameters_optimizer_kwargs'] the learning rates and such for the different elements of the models. see https://pytorch.org/docs/stable/optim.html\n concurrent_val : boole\n If set to true a subprocess will be started which concurrently evaluates the validation method selected.\n Warning: if concurrent_val is set than \"if __name__=='__main__'\" or import from a file if using self defined method or networks.\n cuda : bool\n if cuda will be used (often slower than not using it, be aware)\n timeout : None or number\n Alternative to epochs to run until a set amount of time has past. \n verbose : int\n Set to 0 for a silent run\n sqrt_train : boole\n will sqrt the loss while printing\n num_workers_data_loader : int\n see https://pytorch.org/docs/stable/data.html\n print_full_time_profile : boole\n will print the full time profile, useful for debugging and basic process optimization. \n scheduler_kwargs : dict\n learning rate scheduals are a work in progress.\n \n Notes\n -----\n This method implements a batch optimization method in the following way; each epoch the training data is scrambled and batched where each batch\n is passed to the self.loss method and utilized to optimize the parameters. After each epoch the systems is validated using the evaluation of a \n simulation or a validation split and a checkpoint will be crated if a new lowest validation loss has been achieved. (or concurrently if concurrent_val=True)\n After training (which can be stopped at any moment using a KeyboardInterrupt) the system is loaded with the lowest validation loss. \n\n The default checkpoint location is \"C:/Users/USER/AppData/Local/deepSI/checkpoints\" for windows and ~/.deepSI/checkpoints/ for unix like.\n These can be loaded manually using sys.load_checkpoint(\"_best\") or \"_last\". (For this to work the sys.unique_code needs to be set to the correct string)\n '''\n def validation(train_loss=None, time_elapsed_total=None):\n self.eval(); self.cpu()\n Loss_val = self.cal_validation_error(val_sys_data, validation_measure=validation_measure)\n self.Loss_val.append(Loss_val)\n self.Loss_train.append(train_loss)\n self.time.append(time_elapsed_total)\n self.batch_id.append(self.batch_counter)\n self.epoch_id.append(self.epoch_counter)\n if self.bestfit>=Loss_val:\n self.bestfit = Loss_val\n self.checkpoint_save_system()\n if cuda: \n self.cuda()\n self.train()\n return Loss_val\n \n ########## Initialization ##########\n if self.init_model_done==False:\n if verbose: print('Initilizing the model and optimizer')\n device = 'cuda' if cuda else 'cpu'\n optimizer_kwargs = deepcopy(optimizer_kwargs)\n parameters_optimizer_kwargs = optimizer_kwargs.get('parameters_optimizer_kwargs',{})\n if parameters_optimizer_kwargs:\n del optimizer_kwargs['parameters_optimizer_kwargs']\n self.init_model(sys_data=train_sys_data, device=device, auto_fit_norm=auto_fit_norm, optimizer_kwargs=optimizer_kwargs,\\\n parameters_optimizer_kwargs=parameters_optimizer_kwargs, scheduler_kwargs=scheduler_kwargs)\n else:\n if verbose: print('Model already initilized (init_model_done=True), skipping initilizing of the model, the norm and the creation of the optimizer')\n self._check_and_refresh_optimizer_if_needed() \n\n\n if self.scheduler==False and verbose:\n print('!!!! Your might be continuing from a save which had scheduler but which was removed during saving... check this !!!!!!')\n \n self.dt = train_sys_data.dt\n if cuda: \n self.cuda()\n self.train()\n\n self.epoch_counter = 0 if len(self.epoch_id)==0 else self.epoch_id[-1]\n self.batch_counter = 0 if len(self.batch_id)==0 else self.batch_id[-1]\n extra_t = 0 if len(self.time) ==0 else self.time[-1] #correct timer after restart\n\n ########## Getting the data ##########\n data_train = self.make_training_data(self.norm.transform(train_sys_data), **loss_kwargs)\n if not isinstance(data_train, Dataset) and verbose: print_array_byte_size(sum([d.nbytes for d in data_train]))\n\n #### transforming it back to a list to be able to append. ########\n self.Loss_val, self.Loss_train, self.batch_id, self.time, self.epoch_id = list(self.Loss_val), list(self.Loss_train), list(self.batch_id), list(self.time), list(self.epoch_id)\n\n #### init monitoring values ########\n Loss_acc_val, N_batch_acc_val, val_counter, best_epoch, batch_id_start = 0, 0, 0, 0, self.batch_counter #to print the frequency of the validation step.\n N_training_samples = len(data_train) if isinstance(data_train, Dataset) else len(data_train[0])\n batch_size = min(batch_size, N_training_samples)\n N_batch_updates_per_epoch = N_training_samples//batch_size\n if verbose>0: \n print(f'N_training_samples = {N_training_samples}, batch_size = {batch_size}, N_batch_updates_per_epoch = {N_batch_updates_per_epoch}')\n \n ### convert to dataset ###\n if isinstance(data_train, Dataset):\n persistent_workers = False if num_workers_data_loader==0 else True\n data_train_loader = DataLoader(data_train, batch_size=batch_size, drop_last=True, shuffle=True, \\\n num_workers=num_workers_data_loader, persistent_workers=persistent_workers)\n else: #add my basic DataLoader\n data_train_loader = My_Simple_DataLoader(data_train, batch_size=batch_size) #is quite a bit faster for low data situations\n\n if concurrent_val:\n self.remote_start(val_sys_data, validation_measure)\n self.remote_send(float('nan'), extra_t)\n else: #start with the initial validation \n validation(train_loss=float('nan'), time_elapsed_total=extra_t) #also sets current model to cuda\n if verbose: \n print(f'Initial Validation {validation_measure}=', self.Loss_val[-1])\n\n try:\n t = Tictoctimer()\n start_t = time.time() #time keeping\n epochsrange = range(epochs) if timeout is None else itertools.count(start=0)\n if timeout is not None and verbose>0: \n print(f'Starting indefinite training until {timeout} seconds have passed due to provided timeout')\n\n for epoch in (tqdm(epochsrange) if verbose>0 else epochsrange):\n bestfit_old = self.bestfit #to check if a new lowest validation loss has been achieved\n Loss_acc_epoch = 0.\n t.start()\n t.tic('data get')\n for train_batch in data_train_loader:\n if cuda:\n train_batch = [b.cuda() for b in train_batch]\n t.toc('data get')\n def closure(backward=True):\n t.toc('optimizer start')\n t.tic('loss')\n Loss = self.loss(*train_batch, **loss_kwargs)\n t.toc('loss')\n if backward:\n t.tic('zero_grad')\n self.optimizer.zero_grad()\n t.toc('zero_grad')\n t.tic('backward')\n Loss.backward()\n t.toc('backward')\n t.tic('stepping')\n return Loss\n\n t.tic('optimizer start')\n training_loss = self.optimizer.step(closure).item()\n t.toc('stepping')\n if self.scheduler:\n t.tic('scheduler')\n self.scheduler.step()\n t.tic('scheduler')\n Loss_acc_val += training_loss\n Loss_acc_epoch += training_loss\n N_batch_acc_val += 1\n self.batch_counter += 1\n self.epoch_counter += 1/N_batch_updates_per_epoch\n\n t.tic('val')\n if concurrent_val and self.remote_recv(): ####### validation #######\n self.remote_send(Loss_acc_val/N_batch_acc_val, time.time()-start_t+extra_t)\n Loss_acc_val, N_batch_acc_val, val_counter = 0., 0, val_counter + 1\n t.toc('val')\n t.tic('data get')\n t.toc('data get')\n\n ########## end of epoch clean up ##########\n train_loss_epoch = Loss_acc_epoch/N_batch_updates_per_epoch\n if np.isnan(train_loss_epoch):\n if verbose>0: print(f'&&&&&&&&&&&&& Encountered a NaN value in the training loss at epoch {epoch}, breaking from loop &&&&&&&&&&')\n break\n\n t.tic('val')\n if not concurrent_val:\n validation(train_loss=train_loss_epoch, \\\n time_elapsed_total=time.time()-start_t+extra_t) #updates bestfit and goes back to cpu and back\n t.toc('val')\n t.pause()\n\n ######### Printing Routine ##########\n if verbose>0:\n time_elapsed = time.time() - start_t\n if bestfit_old > self.bestfit:\n print(f'########## New lowest validation loss achieved ########### {validation_measure} = {self.bestfit}')\n best_epoch = epoch+1\n if concurrent_val: #if concurrent val than print validation freq\n val_feq = val_counter/(epoch+1)\n valfeqstr = f', {val_feq:4.3} vals/epoch' if (val_feq>1 or val_feq==0) else f', {1/val_feq:4.3} epochs/val'\n else: #else print validation time use\n valfeqstr = f''\n trainstr = f'sqrt loss {train_loss_epoch**0.5:7.4}' if sqrt_train and train_loss_epoch>=0 else f'loss {train_loss_epoch:7.4}'\n Loss_val_now = self.Loss_val[-1] if len(self.Loss_val)!=0 else float('nan')\n Loss_str = f'Epoch {epoch+1:4}, {trainstr}, Val {validation_measure} {Loss_val_now:6.4}'\n loss_time = (t.acc_times['loss'] + t.acc_times['optimizer start'] + t.acc_times['zero_grad'] + t.acc_times['backward'] + t.acc_times['stepping']) /t.time_elapsed\n time_str = f'Time Loss: {loss_time:.1%}, data: {t.acc_times[\"data get\"]/t.time_elapsed:.1%}, val: {t.acc_times[\"val\"]/t.time_elapsed:.1%}{valfeqstr}'\n self.batch_feq = (self.batch_counter - batch_id_start)/(time.time() - start_t)\n batch_str = (f'{self.batch_feq:4.1f} batches/sec' if (self.batch_feq>1 or self.batch_feq==0) else f'{1/self.batch_feq:4.1f} sec/batch')\n print(f'{Loss_str}, {time_str}, {batch_str}')\n if print_full_time_profile:\n print('Time profile:',t.percent())\n\n ####### Timeout Breaking ##########\n if timeout is not None:\n if time.time() >= start_t+timeout:\n break\n except KeyboardInterrupt:\n print('Stopping early due to a KeyboardInterrupt')\n\n self.train(); self.cpu()\n del data_train_loader\n\n ####### end of training concurrent things #####\n if concurrent_val:\n if verbose: print(f'Waiting for started validation process to finish and one last validation... (receiving = {self.remote.receiving})',end='')\n if self.remote_recv(wait=True):\n if verbose: print('Recv done... ',end='')\n if N_batch_acc_val>0:\n self.remote_send(Loss_acc_val/N_batch_acc_val, time.time()-start_t+extra_t)\n self.remote_recv(wait=True)\n self.remote_close()\n if verbose: print('Done!')\n\n \n self.Loss_val, self.Loss_train, self.batch_id, self.time, self.epoch_id = np.array(self.Loss_val), np.array(self.Loss_train), np.array(self.batch_id), np.array(self.time), np.array(self.epoch_id)\n self.checkpoint_save_system(name='_last')\n try:\n self.checkpoint_load_system(name='_best')\n except FileNotFoundError:\n print('no best checkpoint found keeping last')\n if verbose: \n print(f'Loaded model with best known validation {validation_measure} of {self.bestfit:6.4} which happened on epoch {best_epoch} (epoch_id={self.epoch_id[-1] if len(self.epoch_id)>0 else 0:.2f})')\n\n ########## Saving and loading ############\n def checkpoint_save_system(self, name='_best', directory=None):\n directory = get_work_dirs()['checkpoints'] if directory is None else directory\n file = os.path.join(directory,self.name + name + '.pth')\n torch.save(self.__dict__, file)\n def checkpoint_load_system(self, name='_best', directory=None):\n directory = get_work_dirs()['checkpoints'] if directory is None else directory\n file = os.path.join(directory,self.name + name + '.pth')\n try:\n self.__dict__ = torch.load(file)\n init_model_done = self.init_model_done if hasattr(self,'init_model_done') else True\n if init_model_done:\n self.Loss_val, self.Loss_train, self.batch_id, self.time, self.epoch_id = np.array(self.Loss_val), np.array(self.Loss_train), np.array(self.batch_id), np.array(self.time), np.array(self.epoch_id)\n for i in np.where(np.isnan(self.Loss_train))[0]:\n if i!=len(self.Loss_train)-1: #if the last is NaN than I will leave it there. Something weird happened like breaking before one validation loop was completed. \n self.Loss_train[i] = self.Loss_train[i+1]\n except FileNotFoundError:\n raise FileNotFoundError(f'No such file at {file}, did you set sys.unique_code correctly?')\n def save_system(self, file):\n '''Save the system using pickle provided by torch\n\n Notes\n -----\n This can be quite unstable for long term storage or switching between versions of this and other modules.\n Consider manually creating a save_system function for a long term solution. (maybe utilize checkpoint_save_system)\n '''\n torch.save(self, file)\n\n def _check_and_refresh_optimizer_if_needed(self):\n if hasattr(self.optimizer, '_cuda_graph_capture_health_check'): \n try:\n self.optimizer._cuda_graph_capture_health_check()\n except AttributeError:\n print('*** Refreshing optimizer with _refresh_optimizer (probably due to a restart of training after loading the model from a file)')\n self._refresh_optimizer()\n def _refresh_optimizer(self):\n parameters = [item for name,item in self.parameters_with_names.items()]\n optimizer_new = self.optimizer.__class__(parameters, **self.optimizer.defaults)\n optimizer_new.load_state_dict(self.optimizer.state_dict())\n self.optimizer = optimizer_new\n\n ### CPU & CUDA Transfers ###\n def cuda(self):\n self.to_device('cuda')\n def cpu(self):\n self.to_device('cpu')\n def to_device(self,device):\n for d in dir(self):\n if d in ['parameters_with_names','parameters']:\n continue\n attribute = self.__getattribute__(d)\n if isinstance(attribute,(nn.Module,nn.Parameter)):\n attribute.to(device)\n elif isinstance(attribute, torch.optim.Optimizer):\n for key,item in attribute.state.items():\n for name,item2 in item.items():\n if isinstance(item2, torch.Tensor):\n item[name] = item2.to(device)\n def eval(self):\n for d in dir(self):\n attribute = self.__getattribute__(d)\n if isinstance(attribute,nn.Module):\n attribute.eval()\n def train(self):\n for d in dir(self):\n attribute = self.__getattribute__(d)\n if isinstance(attribute,nn.Module):\n attribute.train()\n\n ########## Remote ##########\n def remote_start(self, val_sys_data, validation_measure):\n from multiprocessing import Process, Pipe\n self.remote, work_remote = Pipe()\n self.remote.receiving = False\n process = Process(target=_worker, args=(work_remote, self.remote, val_sys_data, validation_measure))\n process.daemon = True # if the main process crashes, we should not cause things to hang\n process.start()\n work_remote.close()\n self.remote.process = process\n\n def remote_send(self, Loss_acc_val, time_optimize):\n assert self.remote.receiving==False\n remote = self.remote\n del self.remote #remote cannot be copyied by deepcopy\n copy_self = deepcopy(self)\n self.remote = remote\n copy_self.cpu(); copy_self.eval()\n import pickle\n if b'__main__' in pickle.dumps(copy_self.scheduler):\n print('setting scheduler to None for there is some main function found')\n copy_self.scheduler = False\n self.remote.send((copy_self, Loss_acc_val, time_optimize)) #time here does not matter\n self.remote.receiving = True\n\n def remote_recv(self,wait=False):\n if self.remote.receiving and (self.remote.poll() or wait):\n self.Loss_val, self.Loss_train, self.batch_id, self.time, self.epoch_id, self.bestfit = self.remote.recv()\n self.remote.receiving = False\n return True\n else:\n return False\n\n def remote_close(self):\n self.remote.close()\n self.remote.process.join()\n del self.remote\n\nimport signal\nimport logging\nclass IgnoreKeyboardInterrupt:\n def __enter__(self):\n self.old_handler = signal.signal(signal.SIGINT, self.handler)\n \n def handler(self, sig, frame):\n print('Validation process received SIGINT but was ignored in favour of finishing computations.')\n \n def __exit__(self, type, value, traceback): #on exit it will raise the keyboard interpurt\n signal.signal(signal.SIGINT, self.old_handler)\n\ndef _worker(remote, parent_remote, val_sys_data=None, validation_measure='sim-NRMS'):\n '''Utility function used by .fit for concurrent validation'''\n \n parent_remote.close()\n while True:\n try:\n with IgnoreKeyboardInterrupt():\n sys, Loss_train, time_now = remote.recv() #gets the current network\n Loss_val = sys.cal_validation_error(val_sys_data, validation_measure)\n sys.Loss_val.append(Loss_val)\n sys.Loss_train.append(Loss_train)\n sys.batch_id.append(sys.batch_counter)\n sys.time.append(time_now)\n sys.epoch_id.append(sys.epoch_counter)\n\n sys.train() #back to training mode\n if sys.bestfit >= Loss_val:\n sys.bestfit = Loss_val\n sys.checkpoint_save_system('_best')\n remote.send((sys.Loss_val, sys.Loss_train, sys.batch_id, sys.time, sys.epoch_id, sys.bestfit)) #sends back arrays\n except EOFError: #main process stopped\n break\n except Exception as err: #some other error\n import traceback\n with open('validation process crash file.txt','w') as f:\n f.write(traceback.format_exc())\n raise err\n\n\nclass Tictoctimer(object):\n def __init__(self):\n self.time_acc = 0\n self.timer_running = False\n self.start_times = dict()\n self.acc_times = dict()\n @property\n def time_elapsed(self):\n if self.timer_running:\n return self.time_acc + time.time() - self.start_t\n else:\n return self.time_acc\n \n def start(self):\n self.timer_running = True\n self.start_t = time.time()\n \n def pause(self):\n self.time_acc += time.time() - self.start_t\n self.timer_running = False\n \n def tic(self,name):\n self.start_times[name] = time.time()\n \n def toc(self,name):\n if self.acc_times.get(name) is None:\n self.acc_times[name] = time.time() - self.start_times[name]\n else:\n self.acc_times[name] += time.time() - self.start_times[name]\n\n def percent(self):\n elapsed = self.time_elapsed\n R = sum([item for key,item in self.acc_times.items()])\n return ', '.join([key + f' {item/elapsed:.1%}' for key,item in self.acc_times.items()]) +\\\n f', others {1-R/elapsed:.1%}'\n \nclass My_Simple_DataLoader:\n def __init__(self, data, batch_size=32):\n self.data = [torch.as_tensor(d,dtype=torch.float32) for d in data] #this copies the data again\n self.ids = np.arange(len(data[0]),dtype=int)\n self.batch_size = batch_size\n \n def __iter__(self):\n np.random.shuffle(self.ids)\n return My_Simple_DataLoaderIterator(self.data, self.ids, self.batch_size)\n \nclass My_Simple_DataLoaderIterator:\n def __init__(self, data, ids, batch_size):\n self.ids = ids #already shuffled\n self.data = data\n self.L = len(data[0])\n self.i = 0\n self.batch_size = batch_size\n def __iter__(self):\n return self\n def __next__(self):\n self.i += self.batch_size\n if self.i>self.L:\n raise StopIteration\n ids_now = self.ids[self.i-self.batch_size:self.i]\n return [d[ids_now] for d in self.data]\n\ndef print_array_byte_size(Dsize):\n if Dsize>2**30: \n dstr = f'{Dsize/2**30:.1f} GB!'\n dstr += '\\nConsider using online_construct=True (in loss_kwargs) or let make_training_data return a Dataset to reduce data-usage'\n elif Dsize>2**20: \n dstr = f'{Dsize/2**20:.1f} MB'\n else:\n dstr = f'{Dsize/2**10:.1f} kB'\n print('Size of the training array = ', dstr)\n\nif __name__ == '__main__':\n # sys = deepSI.fit_systems.SS_encoder(nx=3,na=5,nb=5)\n sys = deepSI.fit_systems.Torch_io_siso(10,10)\n train, test = deepSI.datasets.CED()\n print(train,test)\n # exit()\n # sys.fit(train,loss_val=test,epochs=500,batch_size=126,concurrent_val=True)\n sys.fit(train,sim_val=test,loss_kwargs=dict(online_construct=False),epochs=500,batch_size=126,\\\n concurrent_val=True,num_workers_data_loader=0,validation_measure='sim-NRMS')\n # sys.fit(train,sim_val=test,epochs=10,batch_size=64,concurrent_val=False)\n # sys.fit(train,sim_val=test,epochs=10,batch_size=64,concurrent_val=True)\n print(sys.Loss_train)\n","repo_name":"GerbenBeintema/deepSI","sub_path":"deepSI/fit_systems/fit_system.py","file_name":"fit_system.py","file_ext":"py","file_size_in_byte":34303,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"81"} +{"seq_id":"43341295629","text":"from socket import gethostname\nfrom django.views.generic.base import TemplateView\nfrom .models import OrdersModel\nfrom TBDjangoDemo.settings import ENABLE_POSTGRES, ENABLE_MYSQL\n\nclass IndexView(TemplateView):\n template_name = \"index.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n orders_list = OrdersModel.objects.all()\n context['orders_list'] = orders_list\n context['enable_postgres'] = ENABLE_POSTGRES\n context['enable_mysql'] = ENABLE_MYSQL\n context['hostname'] = gethostname()\n\n return context\n\n","repo_name":"sivameetsu/demo-project","sub_path":"DemoApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38498187348","text":"# coding: utf8\nimport hashlib\nimport logging\nimport os\n\nfrom cos_python import qcloud_cos\n\nbase_name = u'/tmp/test' # 本地上传的路径\napp_id = 8527552 # 替换为用户的appid\nsecret_id = u'lo3215guhilh' # 替换为用户的secret_id\nsecret_key = u'vhoipiljghjiphlkj;' # 替换为用户的secret_key\nregion_info = \"tj\" # 替换为用户的region,例如 sh 表示华东园区, gz 表示华南园区, tj 表示华北园区\ncos_client = qcloud_cos.CosClient(app_id, secret_id, secret_key, region=region_info)\ncos_bucket_name = u'ttttt'\n\n# 日志配置\nlogging.basicConfig(level=logging.ERROR,\n filename='/var/log/cos.log',\n filemode='rw+',\n format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n\n\ndef create_dir(bucket, path_dir):\n dir_res = qcloud_cos.CreateFolderRequest(bucket, path_dir)\n create_set = cos_client.create_folder(dir_res)\n return create_set.get('code')\n\n\ndef cos_upload_poker(bucket, dst, source, insert_num=0):\n request = qcloud_cos.UploadFileRequest(bucket, dst, source, insert_only=insert_num)\n upload = cos_client.upload_file(request)\n if upload.get('code') != 0:\n # logging.error('%s 文件上传失败' % source)\n logging.error(upload)\n\n\n# 计算本地计算机的哈希值,然后和远端对比, 如果一样则不上传\ndef CalcSha1(filepath):\n with open(filepath, 'rb') as f:\n sha1obj = hashlib.sha1()\n sha1obj.update(f.read())\n hash = sha1obj.hexdigest()\n return hash\n\n\n# 计算远端的sha值\ndef get_stats(bucket, dst, source):\n get_file_stat = qcloud_cos.StatFileRequest(bucket, dst)\n res = cos_client.stat_file(get_file_stat)\n if res.get('code') == 0:\n code_stas = res.get('data').get('sha')\n if code_stas == CalcSha1(source):\n return True\n else:\n return False\n\n\ndef foreach(path):\n def get_file(y_path):\n for j in os.listdir(y_path):\n yield j\n for i in get_file(path):\n # dir = u'/'+os.path.relpath(os.path.join(path, i), start=u'/tmp/cos')\n dir = os.path.join(path, i)[len(base_name):]\n if os.path.isfile(os.path.join(path, i)):\n if not get_stats(cos_bucket_name, dir, base_name + dir):\n cos_upload_poker(cos_bucket_name, dir, base_name + dir)\n\n else:\n create_dir(cos_bucket_name, dir)\n foreach(os.path.join(path, i))\n\nforeach(base_name)\n","repo_name":"x82423990/Dev_scripts","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70016408584","text":"#!/user/bin/env pypy3\nimport glob\nfrom util import path, score2str\ntry:\n import json\n j = json.loads(open('max.json', 'r').read())\nexcept:\n j = {}\n\nS = 0\nfor name in sorted(j.keys()):\n v = j[name]['score']\n f = j[name]['folder']\n pys = glob.glob('{}/*.py'.format(f))\n sol_name = ''\n if pys:\n sol_name = ' '.join(path(pyf).name for pyf in pys)\n print('{:25}: {:20} {:20} {}'.format(name, score2str(v), sol_name, f))\n S += v\nprint('{:25}: {:20}'.format('Total', score2str(S)))\n","repo_name":"exoji2e/Hashcode-demo-uccps-2021","sub_path":"sum_score.py","file_name":"sum_score.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73862724746","text":"import unittest\n\nimport pytest\n\nfrom infrastructure.CassandraDB import get_cassandra_db\nfrom infrastructure.db import Base\nfrom models import link, user\nfrom services.link import exceptions, schema, service\n\n\nclass TestLinkService(unittest.TestCase):\n @pytest.fixture(autouse=True)\n def db_setup(self, db_session_factory):\n self.db = db_session_factory()\n self.engine = self.db.get_bind()\n\n def setUp(self) -> None:\n Base.metadata.create_all(self.engine)\n # Get cassandra session\n self.cassandra_session = get_cassandra_db()\n # Connect to database and initialize service\n self.service = service.Service(self.db, self.cassandra_session)\n # Insert test data\n # Password:\"hello\"\n self.user1 = user.User(\n username=\"test1\",\n hashed_password=\"$2a$12$6UVnRscNBd8bay6evGch8uvJe6fvCPXYD7S43LP4j6IGmhmKaQ3tm\", # noqa:E501\n email=\"test1@test.com\",\n )\n self.user2 = user.User(\n username=\"test2\",\n hashed_password=\"$2a$12$6UVnRscNBd8bay6evGch8uvJe6fvCPXYD7S43LP4j6IGmhmKaQ3tm\", # noqa:E501\n email=\"test2@test.com\",\n )\n self.link1 = link.Link(\n key=\"testlink123\",\n reference=\"http://test.com\",\n owner_id=1,\n action=\"REDIRECT\",\n is_active=True,\n )\n stmt = self.cassandra_session.prepare(\n \"INSERT INTO urls (key, reference, action, owner_id,is_active) VALUES (?,?,?,?,?);\" # noqa:E501\n )\n self.cassandra_session.execute(\n stmt,\n (\n self.link1.key,\n self.link1.reference,\n self.link1.action,\n self.link1.owner_id,\n self.link1.is_active,\n ),\n )\n self.db.add(self.user1)\n self.db.add(self.user2)\n self.db.add(self.link1)\n self.db.commit()\n\n def test_create_link(self):\n \"\"\"Test if user can create a link\"\"\"\n inp = schema.CreateLinkSchema(\n reference=\"http://test.com\",\n is_active=True,\n )\n res = self.service.create_link(inp, 1)\n db_link = (\n self.db.query(link.Link)\n .filter(link.Link.key == res.key)\n .first() # noqa:E501\n )\n self.assertIsNotNone(db_link)\n self.assertEqual(db_link.action, \"REDIRECT\")\n\n def tearDown(self) -> None:\n self.db.rollback()\n self.db.close()\n Base.metadata.drop_all(self.engine)\n # Clear url table\n self.cassandra_session.execute(\"TRUNCATE urls\")\n\n def test_get_link_by_key(self):\n \"\"\"Test if a link can be retreived by key\"\"\"\n res = self.service.get_link_by_key(\"testlink123\")\n self.assertIsNotNone(res)\n self.assertEqual(res.reference, \"http://test.com\")\n self.assertRaises(\n exceptions.LinkNotFoundException,\n self.service.get_link_by_key,\n \"testlink345\", # noqa:E501\n )\n\n def test_get_all_links(self):\n \"\"\"Test if all links can be retreived\"\"\"\n res = self.service.get_all_links()\n self.assertIsNotNone(res)\n self.assertEqual(len(res), 1)\n self.assertEqual(res[0].reference, \"http://test.com\")\n\n def test_update_link(self):\n \"\"\"Test if link can be updated\"\"\"\n inp = schema.UpdateLinkSchema(\n key=\"testlink123\",\n action=\"BLOCK\",\n )\n res = self.service.update_link_action_by_key(inp)\n self.assertIsNotNone(res)\n self.assertEqual(res.action, \"BLOCK\")\n inp = schema.UpdateLinkSchema(\n key=\"testlink\",\n action=\"BLOCK\",\n )\n self.assertRaises(\n exceptions.LinkNotFoundException,\n self.service.update_link_action_by_key,\n inp, # noqa:E501\n )\n\n # TODO: Remove commented code\n '''\n\n def test_get_user_by_email(self):\n \"\"\"Test if a user can be retreived by email\"\"\"\n db_user = self.service.get_user_by_email(self.user1.email)\n self.assertIsNotNone(db_user)\n self.assertEqual(db_user.username, self.user1.username)\n self.assertRaises(\n exceptions.UserNotFoundException,\n self.service.get_user_by_email,\n \"test5@test.com\",\n )\n\n def test_authenticate(self):\n \"\"\"Test if a user can be authenticated\"\"\"\n creds = schema.AuthenticateSchema(\n username=self.user1.username, password=\"hello\"\n )\n db_user = self.service.authenticate(creds)\n self.assertIsNotNone(db_user)\n self.assertEqual(db_user.username, self.user1.username)\n invalid_creds = schema.AuthenticateSchema(\n username=self.user1.username, password=\"hello2\"\n )\n self.assertRaises(\n exceptions.InvalidCredentials,\n self.service.authenticate,\n invalid_creds, # noqa:E501\n )\n'''\n","repo_name":"Krishap-s/url-shortener","sub_path":"services/link/test_service.py","file_name":"test_service.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20434633001","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 30 14:29:42 2017\n\n@author: erwan\n\nSummary\n-------\n\nReproducing the validation case of Klarenaar 2017 [1]_, who calculated a transmittance\nspectrum from the initial data of Dang 1982 [2]_, with a 1 rotational temperature +\n3 vibrational temperature (Treanor distributions) model\n\nCO2 Energies are calculated from Dunham developments in an uncoupled harmonic\noscillator - rigid rotor model\n\nReferences\n----------\n\n.. [1] Klarenaar et al 2017, \"Time evolution of vibrational temperatures in a CO2 glow\n discharge measured with infrared absorption spectroscopy\" doi/10.1088/1361-6595/aa902e\n\n.. [2] Dang et al 1982, \"Detailed vibrational population distributions in a CO2 laser\n discharge as measured with a tunable diode laser\" doi/10.1007/BF00694640\n\n\n-------------------------------------------------------------------------------\n\n\n\"\"\"\n\nfrom os.path import join\n\nfrom radis import SpectrumFactory\nfrom radis.misc.printer import printm\nfrom radis.spectrum import Spectrum, get_residual, plot_diff\nfrom radis.test.utils import getValidationCase, setup_test_line_databases\n\n\ndef test_klarenaar_validation_case(\n verbose=True, plot=False, warnings=True, *args, **kwargs\n):\n \"\"\"Reproduce the Klarenaar 2018 validation case, as given in the\n [RADIS-2018]_ article.\n\n References\n ----------\n\n Klarenaar et al, \"Time evolution of vibrational temperatures in a CO 2 glow\n discharge measured with infrared absorption spectroscopy\", doi 10.1088/1361-6595/aa902e,\n and the references there in.\n\n \"\"\"\n\n setup_test_line_databases()\n\n # %% Data from Dang, adapted by Klarenaar\n s_exp = Spectrum.from_txt(\n getValidationCase(\n join(\n \"test_CO2_3Tvib_vs_klarenaar_data\", \"klarenaar_2017_digitized_data.csv\"\n )\n ),\n \"transmittance_noslit\",\n wunit=\"cm-1\",\n unit=\"\",\n delimiter=\",\",\n name=\"Klarenaar 2017\",\n )\n\n # %% Calculate Klarenaar test case conditions\n\n sf = SpectrumFactory(\n 2284.2,\n 2284.6,\n wstep=0.001, # cm-1\n pressure=20 * 1e-3, # bar\n cutoff=1e-25,\n molecule=\"CO2\",\n isotope=\"1,2\",\n path_length=10, # cm-1\n # warning! 10% in mass fraction -> less in mole fraction\n mole_fraction=0.1 * 28.97 / 44.07,\n truncation=0.5, # cm-1\n medium=\"vacuum\",\n export_populations=\"vib\",\n )\n sf.warnings[\"MissingSelfBroadeningWarning\"] = \"ignore\"\n # sf.load_databank('HITEMP-CO2-DUNHAM')\n sf.load_databank(\"HITEMP-CO2-TEST\")\n\n # Calculate with Klarenaar fitted values\n T12 = 517\n T3 = 2641\n Trot = 491\n\n s = sf.non_eq_spectrum(\n (T12, T12, T3), Trot, Ttrans=Trot, vib_distribution=\"treanor\", name=\"RADIS\"\n )\n\n if plot:\n plot_diff(s, s_exp, \"transmittance_noslit\")\n # plt.savefig('test_CO2_3Tvib_vs_klarenaar.png')\n\n assert get_residual(s, s_exp, \"transmittance_noslit\", ignore_nan=True) < 0.003\n\n return True\n\n\nif __name__ == \"__main__\":\n printm(\n \"test_CO2_3Tvib_vs_klarenaar:\",\n test_klarenaar_validation_case(verbose=True, plot=True),\n )\n","repo_name":"radis/radis","sub_path":"radis/test/validation/test_CO2_3Tvib_vs_klarenaar.py","file_name":"test_CO2_3Tvib_vs_klarenaar.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":173,"dataset":"github-code","pt":"81"} +{"seq_id":"72807836745","text":"def recursive(new_cards: list, visited: list, i: int,cnt: int) -> list:\n result = []\n if visited[i]:\n return [cnt]\n visited[i] = True\n get_cnt = recursive(new_cards,visited,new_cards[i], cnt + 1)\n result += get_cnt\n for i in range(len(visited)):\n if not visited[i]:\n new_visited = visited\n result += recursive(new_cards,new_visited,i,0)\n return result\n\n\ndef solution(cards):\n # 계산 편리하게 의미 없는 0 값 넣기\n cards.insert(0,0)\n\n new_cards = cards\n visited = [False] * (len(cards))\n visited[0] = True\n get_result = recursive(new_cards, visited, 1, 0)\n if len(get_result) <= 1:\n return 0\n get_result.sort(reverse=True)\n return get_result[0]*get_result[1]\n\ndef main():\n print(solution([8,6,3,7,2,5,1,4]))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"fineman999/Algorithm","sub_path":"Programmers/Level2/PracticeQuestion/a_master_of_playing_alone.py","file_name":"a_master_of_playing_alone.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38548380062","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Oct 4 23:05:57 2020\r\n\r\n@author: ninjaac\r\n\"\"\"\r\n\r\n\r\n#trapping the water\r\n\r\nclass Solution:\r\n @staticmethod\r\n def water(h):\r\n i,j,l=0,1,len(h)-1\r\n result=[]\r\n while i!=l:\r\n print('i',i)\r\n if h[i]<h[j]:\r\n i+=1;j+=1\r\n print('ij',i,j)\r\n while h[i]>h[j] and j!=l:\r\n print('j',j)\r\n j+=1\r\n if j<i and j==l:\r\n i+=1\r\n j=i+1\r\n print('increase ij',i,j)\r\n print('resukt ij',i,j) \r\n result.append((min(h[i],h[j])*((j-i)-1))-sum(h[i+1:j]))\r\n print(result)\r\n i=j\r\n print('last i',i)\r\n j=i+1\r\n return sum(result)\r\nprint(Solution().water(h=[1,0,2,0,1,3]))","repo_name":"pavi-ninjaac/leetcode","sub_path":"Array/Medium/trapping_water.py","file_name":"trapping_water.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31777168468","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sites', '0001_initial'),\n ('auth', '0006_require_contenttypes_0002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Bancodwfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('descargar', models.FileField(null=True, upload_to='static/descargas/', blank=True)),\n ('extencion', models.CharField(default='jpg', max_length=20)),\n ('tipo', models.CharField(default='img', max_length=10)),\n ],\n ),\n migrations.CreateModel(\n name='Bancoimg',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('cats', models.TextField(null=True, blank=True)),\n ('texto', models.TextField(null=True, blank=True)),\n ('titulo', models.CharField(default='', max_length=500, null=True, blank=True)),\n ('original', models.ImageField(null=True, upload_to='static/banco/', blank=True)),\n ('webimage', models.ImageField(null=True, upload_to='static/banco/', blank=True)),\n ('recorte', models.ImageField(null=True, upload_to='static/banco/', blank=True)),\n ('isvideo', models.BooleanField(default=False)),\n ('orden', models.IntegerField(default=0)),\n ('publicado', models.BooleanField(default=False)),\n ('recortar', models.BooleanField(default=False)),\n ('linkvideo', models.CharField(default='', max_length=500, null=True, blank=True)),\n ],\n options={\n 'ordering': ['cats'],\n },\n ),\n migrations.CreateModel(\n name='Cat',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('catname', models.CharField(max_length=300)),\n ('catslug', models.SlugField(max_length=500)),\n ('parentcat', models.ForeignKey(blank=True, to='doctor.Cat', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Downloade',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('dtitle', models.CharField(max_length=200, verbose_name='T\\xedtulo')),\n ('dfile', models.FileField(upload_to='static/downloadables')),\n ('boxpk', models.IntegerField()),\n ('sending', models.BooleanField(default=False)),\n ('type_link', models.CharField(default='link', max_length=20, null=True, blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Downloadmod',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('dwfile', models.FileField(upload_to='static/downlobles/')),\n ],\n ),\n migrations.CreateModel(\n name='LinkDwn',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('dtitle', models.CharField(max_length=200, verbose_name='T\\xedtulo descarga')),\n ('dfile', models.CharField(max_length=200, verbose_name='Archvo descarga')),\n ('modulo', models.CharField(max_length=200)),\n ('boxpk', models.IntegerField()),\n ('sending', models.BooleanField(default=False)),\n ('type_link', models.CharField(default='link', max_length=20, null=True, blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Linkmod',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('imagen', models.ImageField(null=True, upload_to='static/links/', blank=True)),\n ('link', models.CharField(max_length=255, null=True, blank=True)),\n ('title', models.CharField(max_length=255, null=True, blank=True)),\n ('publicado', models.BooleanField(default=False)),\n ],\n ),\n migrations.CreateModel(\n name='Logo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('down_title', models.CharField(max_length=200, verbose_name='T\\xedtulo descarga')),\n ('down_file', models.CharField(max_length=200, verbose_name='Archvo descarga')),\n ],\n ),\n migrations.CreateModel(\n name='Mediasection',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=200, null=True, blank=True)),\n ('associate_file', models.CharField(max_length=500, null=True, blank=True)),\n ('texto', models.TextField(null=True, blank=True)),\n ('webimage', models.ImageField(null=True, upload_to='static/media/', blank=True)),\n ('orden', models.IntegerField(default=0)),\n ('configs', models.TextField(null=True, blank=True)),\n ('publicado', models.BooleanField(default=False)),\n ],\n options={\n 'ordering': ['orden'],\n },\n ),\n migrations.CreateModel(\n name='Permission',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('permisos', models.TextField(null=True, blank=True)),\n ('grupo', models.ForeignKey(to='auth.Group')),\n ],\n ),\n migrations.CreateModel(\n name='Rowsection',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('module', models.CharField(max_length=100)),\n ('configur', models.TextField(null=True, blank=True)),\n ('orden', models.IntegerField(default=1)),\n ('blankrow', models.CharField(max_length=20, null=True, blank=True)),\n ('name_module', models.CharField(max_length=200, null=True, blank=True)),\n ('publicado', models.BooleanField(default=True)),\n ],\n options={\n 'ordering': ['orden'],\n },\n ),\n migrations.CreateModel(\n name='Seccolmenu',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('col_name', models.CharField(max_length=200, null=True, blank=True)),\n ('slug_col_name', models.CharField(max_length=500, null=True, blank=True)),\n ('orden', models.IntegerField(default=1)),\n ],\n options={\n 'ordering': ['orden'],\n },\n ),\n migrations.CreateModel(\n name='Section',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('sec_name', models.CharField(max_length=200, verbose_name='Nombre Secci\\xf3n')),\n ('sec_slug', models.SlugField(max_length=255, verbose_name='slug')),\n ('page_type', models.CharField(blank=True, max_length=50, null=True, verbose_name='Tipo', choices=[('homepage', 'HomePage'), ('bancoimagen', 'banco de Imagenes'), ('intern', 'Interna'), ('links', 'Links')])),\n ('orden', models.IntegerField(default=1)),\n ('webimage', models.CharField(max_length=500, null=True, blank=True)),\n ('mobileimage', models.CharField(max_length=500, null=True, blank=True)),\n ('ishome', models.BooleanField(default=False)),\n ('parent', models.ForeignKey(blank=True, to='doctor.Section', null=True)),\n ('sec_colum_menu', models.ForeignKey(blank=True, to='doctor.Seccolmenu', null=True)),\n ('sitio', models.ForeignKey(blank=True, to='sites.Site', null=True)),\n ],\n options={\n 'ordering': ['orden'],\n },\n ),\n migrations.CreateModel(\n name='Textmodule',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('texto', models.TextField(default='')),\n ('publicado', models.BooleanField(default=False)),\n ('rowpk', models.ForeignKey(to='doctor.Rowsection')),\n ],\n ),\n migrations.CreateModel(\n name='Videofiles',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('videof', models.FileField(upload_to='static/videos')),\n ('converted', models.BooleanField(default=False)),\n ('framimg', models.ImageField(null=True, upload_to='static/videos', blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Videomodule',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=100)),\n ('videoid', models.CharField(max_length=500)),\n ('texto', models.TextField(null=True, blank=True)),\n ('orden', models.IntegerField(default=1)),\n ('imagen', models.ImageField(null=True, upload_to='static/videoimg/', blank=True)),\n ('publicado', models.BooleanField(default=False)),\n ('rowpk', models.ForeignKey(to='doctor.Rowsection')),\n ],\n ),\n migrations.AddField(\n model_name='seccolmenu',\n name='parent',\n field=models.ForeignKey(blank=True, to='doctor.Section', null=True),\n ),\n migrations.AddField(\n model_name='rowsection',\n name='sectionpk',\n field=models.ForeignKey(to='doctor.Section'),\n ),\n migrations.AddField(\n model_name='mediasection',\n name='rowpk',\n field=models.ForeignKey(to='doctor.Rowsection'),\n ),\n migrations.AddField(\n model_name='linkmod',\n name='rowpk',\n field=models.ForeignKey(to='doctor.Rowsection'),\n ),\n migrations.AddField(\n model_name='downloadmod',\n name='secpk',\n field=models.ForeignKey(to='doctor.Section'),\n ),\n migrations.AddField(\n model_name='cat',\n name='secparent',\n field=models.ForeignKey(blank=True, to='doctor.Section', null=True),\n ),\n migrations.AddField(\n model_name='bancoimg',\n name='rowpk',\n field=models.ForeignKey(to='doctor.Rowsection'),\n ),\n migrations.AddField(\n model_name='bancodwfile',\n name='bancopk',\n field=models.ForeignKey(to='doctor.Bancoimg'),\n ),\n ]\n","repo_name":"atomychouse/normatividadsite_2021","sub_path":"doctor/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":11666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43337166477","text":"# -*- coding: utf-8 -*-\n\nfrom cypressPage import CypressPage\nfrom web_driver.daphne.func2.page_func import *\nfrom web_driver.daphne.func2.CTA_setting import *\nfrom web_driver.daphne.func2.create_report import part\nfrom web_driver.daphne.func2.cta_error import CtaTestCaseError\n\n\nclass Daphne(CypressPage):\n\n def __init__(self):\n CypressPage.__init__(self)\n self.daphne_branch_list = []\n self.daphne_branch_list_func()\n self.dicom_four()\n\n def daphne_branch_list_func(self):\n nub = 0\n\n while True:\n branch = self.is_element_exist((daphne_branch_xpath['top_branch_first']\n + str(nub)\n + daphne_branch_xpath['top_branch_second']),\n key_path='xpath')\n if branch:\n self.daphne_branch_list.append(branch)\n nub += nub\n\n def dicom_four(self):\n for x in range(len(self.daphne_branch_list)):\n self.driver.execute_script((daphne_branch_xpath['top_branch_first']\n + str(x + 1)\n + daphne_branch_xpath['top_branch_second']))\n\n @run_test_func\n def daphne_dicom():\n self.driver.execute_script(daphne_branch_xpath['daphne_dicom'])\n\n @run_test_func\n def daphne_cpr():\n self.driver.execute_script(daphne_branch_xpath['daphne_cpr'])\n\n\n @test_model_except_exception\n def daphne_xsection():\n self.driver.execute_script(daphne_branch_xpath['daphne_xsection'])\n\n @run_test_func\n def daphne_lumen():\n self.driver.execute_script(daphne_branch_xpath['daphne_lumen_xpath'])\n\n\nif __name__ == '__main__':\n daphne = Daphne()\n","repo_name":"Gingo222/JIN","sub_path":"WebUiTestProject/daphne/page/daphnePage.py","file_name":"daphnePage.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"7264132068","text":"\nimport asyncio\nimport logging\nfrom asysocks.unicomm.common.scanner.targetgen import UniTargetGen\nfrom asysocks.unicomm.common.scanner.scanner import UniScanner\nfrom aiosmb.commons.connection.factory import SMBConnectionFactory\nfrom pysnaffler.snaffler import pySnaffler\nfrom pysnaffler.ruleset import SnafflerRuleSet\nfrom pysnaffler.scanner import SnafflerScanner\nfrom aiosmb import logger\n\nasync def amain():\n\timport argparse\n\n\tparser = argparse.ArgumentParser(description='Snaffler')\n\tparser.add_argument('-w', '--worker-count', type=int, default=100, help='Parallell count')\n\tparser.add_argument('--dl-per-machine', type=int, default=5, help='Max paralell downloads per machine')\n\tparser.add_argument('--dl-total', type=int, default=20, help='Max paralell downloads in total. Global limit')\n\tparser.add_argument('--maxfile', type=int, default=1024*1024*10, help='Max file size to download')\n\tparser.add_argument('-r', '--rules', help='Path to ruleset directory. If not set, default rules will be used.')\n\tparser.add_argument('-t', '--timeout', type=int, default=36000, help='Timeout for each connection. dangerous!')\n\tparser.add_argument('--no-progress', action='store_false', help='Disable progress bar')\n\tparser.add_argument('-o', '--out-file', help='Output file path.')\n\tparser.add_argument('-e', '--errors', action='store_true', help='Includes errors in output.')\n\tparser.add_argument('-d', '--dry-run', action='store_true', help='Dry run. Enumeration only, gives stats on what would be downloaded/checked etc.')\n\tparser.add_argument('-l', '--filelist', action='store_true', help='Generates filelist file containing a list of files and folders enumerated')\n\tparser.add_argument('-k', '--keep-files', action='store_true', help='Keeps downloaded files on disk after parsing')\n\tparser.add_argument('-b', '--base-path', default = 'snaffler_downloads', help='Base directory path for downloaded files')\n\tparser.add_argument('-c', '--config', help='Path to config file. Overrides all other options.')\n\tparser.add_argument('url', help = 'Connection string in URL format')\n\tparser.add_argument('targets', nargs='*', help = 'Hostname or IP address or file with a list of targets')\n\targs = parser.parse_args()\n\n\tif len(args.targets) == 0:\n\t\tprint('No targets defined!')\n\t\treturn\n\t\n\tlogger.setLevel(logging.CRITICAL)\n\t\n\tconnectionfactory = SMBConnectionFactory.from_url(args.url)\n\ttimeout = args.timeout\n\tif args.config is not None:\n\t\tsnaffler = pySnaffler.from_config_file(args.config)\n\telse:\n\t\truleset = SnafflerRuleSet.load_default_ruleset()\n\t\tif args.rules is not None:\n\t\t\truleset = SnafflerRuleSet.from_directory(args.rules)\n\t\t\n\t\tsnaffler = pySnaffler(\n\t\t\truleset, \n\t\t\targs.maxfile, \n\t\t\targs.worker_count, \n\t\t\targs.dl_per_machine, \n\t\t\targs.dl_total,\n\t\t\targs.keep_files,\n\t\t\targs.base_path,\n\t\t\targs.dry_run,\n\t\t\targs.filelist\n\t\t)\n\t\n\t#print('Running config:')\n\t#print(snaffler.to_toml())\n\texecutors = [SnafflerScanner(connectionfactory, snaffler)]\n\ttgen = UniTargetGen.from_list(args.targets)\n\tscanner = UniScanner('Snaffler', executors, [tgen], worker_count=args.worker_count, host_timeout=timeout)\n\tawait scanner.scan_and_process(progress=args.no_progress, out_file=args.out_file, include_errors=args.errors)\n\tsnaffler.print_stats()\n\n\ndef main():\n\tasyncio.run(amain())\n\nif __name__ == '__main__':\n\tmain()","repo_name":"skelsec/pysnaffler","sub_path":"pysnaffler/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"81"} +{"seq_id":"28200727464","text":"# n 입력받아 10미만의 홀수를 nxn크기로 공백으로 구분하여 출력\r\n# n = 3\r\n# 1 3 5\r\n# 7 9 1\r\n# 3 5 7\r\n\r\nn = int(input(\"natural number? \"))\r\n\r\nfor i in range (1, n*n+1):\r\n print((2 * i - 1) % 10, end=\" \")\r\n if i % n == 0:\r\n print()\r\n\r\n","repo_name":"etnflash/PYTHONspace","sub_path":"assignment5/Question_9.py","file_name":"Question_9.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22007631929","text":"#Tarea Programada 3\n#Elaborado por: Felipe Obando y Sebastián Bermúdez.\n#Fecha de creación: 01/06/2021\n#Última modificación: 12/06/2021 10:25 pm\n#Versión: 3.9.2\n\n#Función que graba el archivo.\nimport pickle\ndef graba(nombreArchivo,lista):\n \"\"\"\n Función: Grabar/crear un archivo(base de datos).\n Entradas:\n -nombreArchivo(str): Nombre del archivo en el que se va a grabar/crear.\n -lista(list): Lista que se va a guardar en el archivo.\n Salida: N/A.\n \"\"\"\n try:\n f=open(nombreArchivo,\"wb\")\n pickle.dump(lista,f)\n f.close()\n except:\n print(\"Error al grabar el archivo: \", nombreArchivo)\n return \"\"\n#Función que lee un archivo\ndef lee (nomArchLeer):\n \"\"\"\n Función: Grabar/crear un archivo(base de datos).\n Entradas:\n -nombreArchivo(str): Nombre del archivo que se va a cargar en la RAM.\n Salida: \n -lista(list): Lista de donadores.\n \"\"\"\n try:\n f=open(nomArchLeer,\"rb\")\n lista = pickle.load(f)\n f.close()\n return lista\n except:\n print(\"Error al leer el archivo: \", nomArchLeer)\n return False","repo_name":"Huevaldinho/TareaProgramada3","sub_path":"archivos.py","file_name":"archivos.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"4660397501","text":"from fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom utils.wifis import wifi_list, connect\nfrom drones.connection import drones, add_drones, drones_to_connect\nfrom drones.functions import motor_on, takeoff_all\nfrom drones import functions\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\n@app.get(\"/list/\")\ndef list_wifis():\n return wifi_list()\n\n\n@app.post('/connect_host/')\ndef connect_host(data: dict):\n ssid = data['ssid']\n add_drones(ssid)\n \n connect(ssid)\n if not drones:\n return {'res': 'error', 'drone_ip': None}\n return {'res': 'success', 'drone_ip': list(drones.keys())[0]}\n # return {'res': 'succeнеss', 'drone_ip': '12341234'}\n\n\n@app.get('/drone_coordinates/')\ndef get_coordinates():\n return functions.get_coordinates()\n\n\n\n@app.get('/get_state/')\ndef get_state():\n return functions.get_drone_state(None)\n\n\n@app.post('/connect_client/')\ndef connect_client(data: dict):\n ssid = data['ssid']\n print(drones_to_connect)\n drones_to_connect.add(ssid)\n print(drones_to_connect)\n return {'res': 'success'}\n\n\n@app.post('/disconnect/')\ndef disconnect_handler(data: dict):\n pioneer = drones[data['drone_ip']]\n pioneer.disconnect()\n\n\nclass DroneIp(BaseModel):\n drone_ip: str\n\n\n@app.post('/motor_on/')\ndef motorTurnOn(drone_ip: DroneIp):\n print(drone_ip)\n functions.motor_on(drones[drone_ip.drone_ip])\n\n@app.post('/motor_off/')\ndef motorTurnOn(drone_ip: DroneIp):\n print(drone_ip)\n functions.motor_off(drones[drone_ip.drone_ip])\n\n@app.post('/takeoff_all/')\ndef disconnect_handler():\n functions.takeoff_all()\n\n@app.post('/land/')\ndef disconnect_handler(data: dict):\n pioneer = drones[data['drone_ip']]\n pioneer.land()\n","repo_name":"xrenvtomate/pioneer-gui","sub_path":"api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"4149851733","text":"import datetime\n\nfrom django.db.models import FieldDoesNotExist, Avg, Max, Min, Count, Sum\nfrom django.utils.translation import ugettext as _\nfrom django.forms import Media\n\nfrom users.models import AdPriceRate\nfrom utils import ConversionUtils\nfrom xadmin.sites import site\nfrom xadmin.views import BaseAdminPlugin, ListAdminView\n\nfrom xadmin.views.list import ResultRow, ResultItem\nfrom xadmin.util import display_for_field\n\nAGGREGATE_METHODS = {\n 'min': Min, 'max': Max, 'avg': Avg, 'sum': Sum, 'count': Count\n}\nAGGREGATE_TITLE = {\n 'min': _('Min'), 'max': _('Max'), 'avg': _('Avg'), 'sum': _('Sum'), 'count': _('Count')\n}\n\n\nclass AggregationPlugin(BaseAdminPlugin):\n\n aggregate_fields = {}\n\n def init_request(self, *args, **kwargs):\n return bool(self.aggregate_fields)\n\n def _get_field_aggregate(self, field_name, obj, row):\n item = ResultItem(field_name, row)\n item.classes = ['aggregate', ]\n if field_name not in self.aggregate_fields:\n item.text = \"\"\n else:\n try:\n f = self.opts.get_field(field_name)\n agg_method = self.aggregate_fields[field_name]\n key = '%s__%s' % (field_name, agg_method)\n if key not in obj:\n item.text = \"\"\n else:\n item.text = display_for_field(obj[key], f)\n item.wraps.append('%%s<span class=\"aggregate_title label label-info\">%s</span>' % AGGREGATE_TITLE[agg_method])\n item.classes.append(agg_method)\n except FieldDoesNotExist:\n item.text = \"\"\n\n return item\n\n def _get_aggregate_row(self):\n queryset = self.admin_view.list_queryset._clone()\n obj = queryset.aggregate(*[AGGREGATE_METHODS[method](field_name) for field_name, method in\n self.aggregate_fields.items() if method in AGGREGATE_METHODS])\n if 'cost__sum' in obj:\n\n if self.user.is_superuser != 1:\n obj['cost__sum'] = 0\n for q in queryset:\n obj['cost__sum'] += q.cost\n\n # print('cost__sum::{0}'.format(obj['cost__sum']))\n if 'avgCpc__avg' in obj:\n\n if self.user.is_superuser != 1:\n if obj['clicks__sum'] != 0:\n obj['avgCpc__avg'] = round(obj['cost__sum'] / obj['clicks__sum'], 3)\n if obj['avgCpc__avg'] == 0:\n obj['avgCpc__avg'] = '0.000'\n else:\n obj['avgCpc__avg'] = '0.000'\n # print('avgCpc__avg::{0}'.format(obj['avgCpc__avg']))\n if 'avgCpm__avg' in obj:\n\n if self.user.is_superuser != 1:\n if obj['impressions__sum'] != 0:\n obj['avgCpm__avg'] = round(obj['cost__sum'] / obj['impressions__sum'] * 1000, 3)\n if obj['avgCpm__avg'] == 0:\n obj['avgCpm__avg'] = '0.000'\n else:\n obj['avgCpm__avg'] = '0.000'\n # print('avgCpm__avg::{0}'.format(obj['avgCpm__avg']))\n if 'ctr__sum' in obj:\n\n if self.user.is_superuser != 1:\n if obj['impressions__sum'] != 0:\n obj['ctr__sum'] = str(round((obj['clicks__sum']/obj['impressions__sum'])*100, 2)) + '%'\n else:\n obj['ctr__sum'] = '0.00'\n # print('ctr__sum::{0}'.format(obj['ctr__sum']))\n\n if 'ecpa1__avg' in obj:\n\n if self.user.is_superuser != 1:\n if obj['G1__sum'] != 0:\n obj['ecpa1__avg'] = round(obj['cost__sum'] / obj['G1__sum'], 2)\n if obj['ecpa1__avg'] == 0:\n obj['ecpa1__avg'] = '0.00'\n else:\n obj['ecpa1__avg'] = '0.00'\n # print('ecpa1__sum::{0}'.format(obj['ecpa1__sum']))\n\n if 'ecpa2__avg' in obj:\n\n if self.user.is_superuser != 1:\n if obj['G2__sum'] != 0:\n obj['ecpa2__avg'] = round(obj['cost__sum'] / obj['G2__sum'], 2)\n if obj['ecpa2__avg'] == 0:\n obj['ecpa2__avg'] = '0.00'\n else:\n obj['ecpa2__avg'] = '0.00'\n # print('ecpa2__sum::{0}'.format(obj['ecpa2__sum']))\n\n if 'ecpa3__avg' in obj:\n\n if self.user.is_superuser != 1:\n if obj['G3__sum'] != 0:\n obj['ecpa3__avg'] = round(obj['cost__sum'] / obj['G3__sum'], 2)\n if obj['ecpa3__avg'] == 0:\n obj['ecpa3__avg'] = '0.00'\n else:\n obj['ecpa3__avg'] = '0.00'\n # print('ecpa3__sum::{0}'.format(obj['ecpa3__sum']))\n\n if 'ecpa4__avg' in obj:\n\n if self.user.is_superuser != 1:\n if obj['G4__sum'] != 0:\n obj['ecpa4__avg'] = round(obj['cost__sum'] / obj['G4__sum'], 2)\n if obj['ecpa4__avg'] == 0:\n obj['ecpa4__avg'] = '0.00'\n else:\n obj['ecpa4__avg'] = '0.00'\n # print('ecpa4__sum::{0}'.format(obj['ecpa4__sum']))\n\n row = ResultRow()\n row['is_display_first'] = False\n row.cells = [self._get_field_aggregate(field_name, obj, row) for field_name in self.admin_view.list_display]\n row.css_class = 'info aggregate'\n return row\n\n def results(self, rows):\n if rows:\n rows.append(self._get_aggregate_row())\n return rows\n\n # Media\n def get_media(self, media):\n return media + Media(css={'screen': [self.static('xadmin/css/xadmin.plugin.aggregation.css'), ]})\n\n\nsite.register_plugin(AggregationPlugin, ListAdminView)\n","repo_name":"zhuoxiaojian/xadminTest","sub_path":"extra_apps/xadmin/plugins/aggregation.py","file_name":"aggregation.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11971045478","text":"# This file is part of versuchung.\n# \n# versuchung is free software: you can redistribute it and/or modify it under the\n# terms of the GNU General Public License as published by the Free Software\n# Foundation, either version 3 of the License, or (at your option) any later\n# version.\n# \n# versuchung is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A\n# PARTICULAR PURPOSE. See the GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License along with\n# versuchung. If not, see <http://www.gnu.org/licenses/>.\n\nimport logging\nimport sys\nfrom functools import wraps\n\nclass JavascriptStyleDictAccess(dict):\n def __init__(self, d):\n self.update(d)\n def __getattribute__(self, name):\n try:\n return dict.__getattribute__(self, name)\n except AttributeError:\n pass\n if name in self:\n return self[name]\n name = name.replace(\"_\", \"-\")\n if name in self:\n return self[name]\n raise AttributeError\n\n\n\ndef setup_logging(log_level):\n \"\"\" setup the logging module with the given log_level \"\"\"\n\n l = logging.WARNING # default\n if log_level == 1:\n l = logging.INFO\n elif log_level >= 2:\n l = logging.DEBUG\n\n logging.basicConfig(level=l)\n\nclass Singleton(object):\n _instance = None\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(Singleton, cls).__new__(\n cls, *args, **kwargs)\n return cls._instance\n\n\nclass AdviceManager(Singleton):\n def __init__(self):\n if not \"before\" in dir(self):\n self.before = dict()\n self.around = dict()\n self.after = dict()\n\n def around_wrapper(self, func, last = None):\n def wrapped(args, kwargs):\n if last:\n return func(last, args, kwargs)\n else:\n return func(*args, **kwargs)\n return wrapped\n\n @staticmethod\n def advicable(func):\n \"\"\"Decorator to mark a function as advicable\"\"\"\n if not \"__call__\" in dir(func):\n raise ValueError(\"No function adviced\")\n full_name = \"%s.%s\" % (func.__module__, func.__name__)\n\n self = AdviceManager()\n\n if full_name in self.before:\n raise RuntimeError(\"Function already marked as advicable\")\n self.before[full_name] = []\n self.around[full_name] = []\n self.after[full_name] = []\n\n def wrapped(*args, **kwargs):\n am = AdviceManager()\n for f in am.before[full_name]:\n ret = f(args, kwargs)\n if ret:\n (args, kwargs) = ret\n\n if len(am.around[full_name]) > 0:\n func_ = am.around_wrapper(func, None)\n for f in am.around[full_name]:\n func_ = am.around_wrapper(f, func_)\n\n ret = func_(args, kwargs)\n else:\n ret = func(*args, **kwargs)\n\n for f in am.after[full_name]:\n ret = f(ret)\n\n return ret\n wrapped.__doc__ = func.__doc__\n return wrapped\n\n\nclass Advice:\n def __init__(self, method, enabled = False):\n self.method = method\n am = AdviceManager()\n self.am = am\n if not method in am.before:\n raise RuntimeError(\"Function was not marked @advicable\")\n self.enabled = False\n if enabled:\n self.enable()\n\n def disable(self):\n am = self.am\n am.before[self.method] = [ x for x in am.before[self.method]\n if x != self.before ]\n am.around[self.method] = [ x for x in am.around[self.method]\n if x != self.around ]\n am.after[self.method] = [ x for x in am.after[self.method]\n if x != self.after ]\n self.enabled = False\n\n def enable(self):\n am = self.am\n if self.enabled:\n return\n # Hook only in if the methods are overwritten\n if sys.version_info[0] == 2:\n if self.before.im_func != Advice.before.im_func:\n am.before[self.method].append(self.before)\n if self.around.im_func != Advice.around.im_func:\n am.around[self.method].append(self.around)\n if self.after.im_func != Advice.after.im_func:\n am.after[self.method].append(self.after)\n self.enabled = True\n elif sys.version_info[0] == 3:\n if self.before.__func__ != Advice.before:\n am.before[self.method].append(self.before)\n if self.around.__func__ != Advice.around:\n am.around[self.method].append(self.around)\n if self.after.__func__ != Advice.after:\n am.after[self.method].append(self.after)\n self.enabled = True\n\n def before(self, args, kwargs):\n return (args, kwargs)\n def around(self, func, args, kwargs):\n return func(args, kwargs)\n def after(self, ret):\n return ret\n\n","repo_name":"stettberger/versuchung","sub_path":"versuchung/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"22182871982","text":"import math\n\ndef solution(w,h):\n # if w == h :\n # return w * h - h\n GCD = math.gcd(w,h)\n small_w = int(w/GCD)\n small_h = int(h/GCD)\n tilt = small_h/small_w\n # print(\"tilt :\", tilt)\n count = 0\n before = 0\n if small_w % 2 == 0:\n check = True\n small_w = int(small_w/2)\n else:\n check = False\n small_w = int(small_w/2) + 1\n # print(small_w)\n for i in range(small_w + 1):\n if i == 0: continue\n temp = tilt * i\n count += math.ceil(temp) - before\n if not check and i == small_w:\n last = math.ceil(temp) - before\n # print(last)\n before = int(temp)\n if check :\n count = count * 2\n else:\n count = count * 2 - last\n\n count = count * GCD\n # print(count)\n answer = w*h - count\n return answer\n\nprint(solution(8,12))\n# print(solution(12,8))\n# print(solution(2,3))\n# print(solution(6,18))\n# print(solution(18,6))\nprint(solution(5,7))","repo_name":"SeungHune/beakjun","sub_path":"exercise_coding_test_22.py","file_name":"exercise_coding_test_22.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2245288779","text":"def single_level(users, directories):\n print(\"\\nSingle Level File Organization Scheme : \", end='\\n\\n')\n print(\"Root Directory : \")\n for i in range(len(users)):\n for j in range(len(directories[i])):\n print(f\"\\t--> {directories[i][j]} [ User : {users[i]} ]\")\n\n\ndef two_level(users, directories):\n print(\"\\nTwo Level File Organization Scheme : \", end='\\n\\n')\n print(\"Root Directory : \")\n for i in range(len(users)):\n print(f\"\\t--> {users[i]}\")\n for j in range(len(directories[i])):\n print(f\"\\t\\t--> {directories[i][j]}\")\n\n\ndef main():\n # users = ['Thamizh', 'Sai', 'Saravanan', 'Tarun']\n users = input(\"Enter the User Names separated by a space : \").split(' ')\n # directories = [['A', 'B', 'C'], ['D', 'E', 'F'], ['G', 'H', 'I'], ['J', 'K', 'L']]\n directories = []\n for i in range(len(users)):\n directories.append(input(f\"Enter the file names of the user : {users[i]} separated by a space : \").split(' '))\n single_level(users, directories)\n two_level(users, directories)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Thamizhiniyan18/Operating_System","sub_path":"22 - Single Level and Two level File Organization Scheme.py","file_name":"22 - Single Level and Two level File Organization Scheme.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12456932109","text":"# Author: Abraham Almahmoud\r\n# Date: 02/02/2020\r\n# Description: Write a class named NobelData that reads a JSON file containing data on Nobel Prizes and allows the user\r\n# to search that data. It just needs to read a local JSON file - it doesn't need to access the internet.\r\n# Specifically, your class should have an init method that reads the file, and it should have a method\r\n# named search_nobel that takes as parameters a year and a category, and returns a sorted list (in normal\r\n# dictionary order) of the surnames for the winner(s) in that category for that year (up to three people\r\n# can share the prize). The year will be a string (e.g. \"1975\"), not a number. The categories are:\r\n# \"chemistry\", \"economics\", \"literature\", \"peace\", \"physics\", and \"medicine\". The JSON file will be named\r\n# nobels.json and will be provided - you do not need to submit it.\r\n\r\nimport json\r\n\r\n\r\nclass NobelData:\r\n \"\"\"read json file containing data on nobel prizes, and search for winners based on year/category\"\"\"\r\n def __init__(self):\r\n \"\"\"open the json file containing the data on noble prizes winners\"\"\"\r\n with open(\"nobels.json\", 'r') as infile:\r\n self.nobels = json.load(infile)\r\n\r\n def search_nobel(self, year, category):\r\n \"\"\"go through the json file using the passed values to find the winners for the values\"\"\"\r\n awarded = []\r\n nobel = self.nobels[\"prizes\"]\r\n for prizes in range(len(nobel)):\r\n if nobel[prizes][\"year\"] == year: # find year that match\r\n if nobel[prizes][\"category\"] == category: # find category that match\r\n for winners in range(0, len(nobel[prizes][\"laureates\"])): # store values 1-3\r\n awarded.append(nobel[prizes][\"laureates\"][winners][\"surname\"])\r\n awarded.sort() # sort name based on dictionary order\r\n return awarded\r\n","repo_name":"almahmoa/Resume","sub_path":"Python/NobelData.py","file_name":"NobelData.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1585047015","text":"a=int(input(\"enter the number\"))\r\nrev=0\r\ntemp=a\r\nwhile(a!=0):\r\n rem=a%10\r\n rev=rev*10+rem\r\n a=a//10\r\nprint(rev)\r\nif (rev==temp):\r\n print(\"palandrome\")\r\nelse:\r\n print(\"chlna\")\r\n\r\n \r\n","repo_name":"manavmehta2403/GoogleDrive-Cluster","sub_path":"Dcoder/paldrome.py","file_name":"paldrome.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35704049378","text":"import threading\nimport winsound\nimport cv2\nimport imutils\n\n# initialization for camera\n# parameter 0~n is the number of cameras\ncam = cv2.VideoCapture(0, cv2.CAP_DSHOW)\nprint(cam)\n# set the frame rate\ncam.set(cv2.CAP_PROP_FRAME_WIDTH, 648)\ncam.set(cv2.CAP_PROP_FRAME_HEIGHT, 648)\n\n_, start_frame = cam.read()\nstart_frame = imutils.resize(start_frame, width=500)\nstart_frame = cv2.cvtColor(start_frame, cv2.COLOR_BGR2GRAY)\nstart_frame = cv2.GaussianBlur(start_frame, (21,21), 0)\n\nalarm = False\nalarm_mode = False\nalarm_counter = 0\n\n# function to call when alarm happened\ndef beep_alarm():\n global alarm\n for _ in range(5):\n if not alarm_mode:\n break\n print(\"ALARM\")\n winsound.Beep(2500,1000)\n alarm= False\n\nwhile True:\n _, frame = cam.read()\n frame = imutils.resize(frame, width=500)\n\n # Movement tracking by compare frames\n if alarm_mode:\n frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame_bw = cv2.GaussianBlur(frame_bw,(5,5), 0)\n\n difference = cv2.absdiff(frame_bw, start_frame)\n threshold = cv2.threshold(difference,25,255,cv2.THRESH_BINARY)[1]\n start_frame = frame_bw\n\n # can arrange number to detect movement rates\n if threshold.sum() > 300:\n alarm_counter += 1\n else:\n if alarm_counter > 0:\n alarm_counter -= 1\n cv2.imshow(\"Cam\", threshold)\n else: \n cv2.imshow(\"Cam\", frame)\n\n if alarm_counter > 20:\n if not alarm:\n alarm = True\n threading.Thread(target = beep_alarm).start()\n\n # initialize key_pressed\n key_pressed = cv2.waitKey(38)\n\n if key_pressed == ord(\"t\"):\n alarm_mode = not alarm_mode\n alarm_counter = 0;\n # setting key_pressed\n if key_pressed == ord(\"q\"):\n alarm_mode = False\n break\n\ncam.release()\ncv2.destroyAllWindows()\n","repo_name":"thandarkhineaye/Research-Pieces","sub_path":"motion_detection_Alarm_System/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23459086075","text":"import random\n\nOKGREEN = '\\033[92m'\nFAIL = '\\033[91m'\nENDC = '\\033[0m'\n\n\n# For this question I am using the merge sort\n# Function divides the array into 2 parts thru the middle\ndef divide(arr):\n if len(arr) > 1:\n temp = (len(arr) // 2) # get middle point // ensures we get a int\n left = arr[:\n temp] # left array contains all elements from 0 to middle point\n right = arr[\n temp:] # right array contains all elements from middle point + 1 till end\n\n # repeat until each element has an array of its own\n left = divide(left)\n right = divide(right)\n return merge(left, right)\n else:\n return arr\n\n\ndef merge(left, right):\n output = []\n i, j = 0, 0\n\n # loop thru the two left, and right array\n while i < len(left) and j < len(right):\n # if left side is smaller than the right side put add that\n if left[i] > right[j]:\n # the less than produces an array in accessending order for decending order change '<' to '>'\n output.append(left[i])\n i += 1\n else:\n # if right side is smaller than the left side add that\n output.append(right[j])\n j += 1\n\n # Add the remain pieces from left and right\n while i < len(left):\n output.append(left[i])\n i += 1\n while j < len(right):\n output.append(right[j])\n j += 1\n return output\n\n\ndef ground_truth(arr):\n return sorted(arr)\n\n\ndef random_test_gen():\n EPOCHS = 5\n for _ in range(EPOCHS):\n arr_length = random.randint(0, 10)\n input_arr = []\n for _ in range(arr_length):\n input_arr.append(random.randint(1, 20))\n input_data = input_arr\n expected_output = ground_truth(input_arr)\n # run the function we were testing\n output = divide(input_data)\n # check if the output matches the expected output\n if output != expected_output:\n print(\n f\"{FAIL}Test case Fail{ENDC} \\n Input:{input_data}, \\n Expected: {expected_output} \\n Actual: {output}\"\n )\n else:\n print(\n f\"{OKGREEN}Test case Pass{ENDC} \\n Input:{input_data} \\n Expected: {expected_output} \\n Actual: {output}\"\n )\n\n\ndef all_test_gen(test_cases):\n for test in test_cases:\n input_data = test[0]\n expected_output = test[1]\n # run the function we were testing\n output = divide(input_data)\n # check if the output matches the expected output\n if output != expected_output:\n print(\n f\"{FAIL}Test case Fail{ENDC} \\n Input:{input_data}, \\n Expected: {expected_output} \\n Actual: {output}\"\n )\n else:\n print(\n f\"{OKGREEN}Test case Pass{ENDC} \\n Input:{input_data} \\n Expected: {expected_output} \\n Actual: {output}\"\n )\n\n\nrandom_test_gen()\n# all_test_gen(test_cases=test_cases)\n","repo_name":"sneh2001patel/COSC3P95-assign1","sub_path":"question_2.py","file_name":"question_2.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7828187238","text":"#-*- coding=utf8 -*-\n\nimport os\n\n'''\nCreated on \n\n@author: albertcheng\n'''\n##---------------------\n## 工具自己的参数区域\n##---------------------\nlinesep = os.linesep\nFTP_Host = \"192.168.1.24\" # ftp ip\nFTP_Port = 21# ftp port\nFTP_User = \"joker\"# ftp user\nFTP_Passwd = \"zhaoqing@2015\"# ftp password\nFTP_Root = \"res_tool_for_xgame/\"#工具的更新路径\n\nTL_Host = \"192.168.1.24\" # telnet ip\nTL_User = \"administrator\" # telnet user\nTL_Pawd = \"zhaoguichun@2015\" # telnet password\nTL_finish_unix = \":~$\"\nTL_finish_win32 = \">\"\n\nVERSION_MANIFEST = \"ver.manifest\"\n\n\nSMB_VERPATH = u\"/6.版本仓库/xgame_pc\"\nSMB_SAVETO = \"./xgame_pc\"\nRES_SERVER = [u\"http://192.168.1.120/resource|策划1 - 120\",\n u\"http://192.168.1.121/resource|策划2 - 121\", \n u\"http://192.168.1.122/resource|策划3 - 122\", \n u\"http://192.168.1.66:8080|内网测试服 - 66\"]\nLabel_Caption = \"xgame tool\"\nLabel_Config = u\"刷新列表\"\nLabel_Launch = u\"启动客户端\"\nLabel_MSGBOX = u\"注意\"\nLabel_OpenFolder = u\"打开文件夹\"\nLabel_AutoUpdate = u\"自动更新\"\nLabel_StopAutoUpdate = u\"停止更新\"\nLabel_Check = u\"检查版本更新\"\nLabel_IPChoice = u\"选择资源更新服务器\"\nLabel_VerChoice = u\"选择自动更新的版本\"\nSMB_ACCOUNT = \"joker\"#\"albertcheng\"#\"joker\" \nSMB_PASSWD = \"zhaoqing@2015\"#\"0506\"#\"zhaoqing@2015\"\nSMB_CLIENT = \"xgame-tool\"\nSMB_SERVER = \"FileServer\"\nSMB_DOMAIN = \"WORKGROUP\"\nSMB_HOST_IP = \"192.168.1.24\"\nSMB_ROOT = \"ShareFiles\"\noutlog_1 = \"samba connected failed.\"\noutlog_2 = u\"获取版本资源的路径清单失败。\"\noutlog_3 = u'目前资源服务器选择为: %s \\n服务器对应地址为 %s\\n'\noutlog_4 = u'目前版本选择为: %s\\n'\noutlog_5 = u\"参数配置功能未开放,有本事你提需求\\n\"\noutlog_6 = u\"准备启动游戏客户端...\\n\"\noutlog_7 = u'目前资源服务器选择为: %s \\n'\noutlog_8 = u'游戏版本选择为: %s \\n'\noutlog_9 = u\"客户端不存在,即将开始下载,下载后会自动启动游戏...\\n\"\noutlog_10 = u'切换工具的更新开关,然并卵,因未做\\n'\noutlog_11 = u\"选择更新版本为:\"\noutlog_12 = u\"查找版本失败,版本已经被删除或版本名异常\"\noutlog_13 = u\"有 [%s] 个文件等待下载 \\n\"\noutlog_14 = u\"用户暂停客户端的下载线程\\n\"\noutlog_15 = u\"正在下载文件 - %s\\n\"\noutlog_16 = u\"更新版本共计耗时: [%.2f] 秒\\n\" \noutlog_17 = u\"要下载的版本已经存在,是否继续下载?\"\noutlog_18 = u\"用户放弃了版本资源下载\\n\"\noutlog_19 = u\"版本下载完毕,是否启动客户端?\" \noutlog_20 = u\"列表加载完毕.\\n\"\noutlog_21 = u\"要打开的版本不存在,请先更新。\"\noutlog_22 = u\"samba连接已经中断,请重新启动工具或等待重连。\\n\"\noutlog_23 = u\"正在下载资源,请稍安勿躁。\"\noutlog_24 = u\"拉取资源清单成功。\\n\"\noutlog_25 = u\"修改资源服务器配置失败。\"\noutlog_26 = u\"下载文件 - %s 失败。\\n\"\noutlog_27 = u\"启动游戏失败,未发现可执行程序,请检查版本目录。\\n\" \nlog_msg_31 = u\"准备上传 [%s - %s] 到Ftp\" \nlog_msg_32 = u\"上传文件 [%s] 到Ftp成功\"\nlog_msg_33 = u\"上传完成,耗时%.2f秒\"\nlog_msg_34 = u\"所有文件上传完成,耗时%.2f秒\"","repo_name":"foreverckat/client_manager","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29973776493","text":"def soma_coisas(*args):\n bolso = 0\n for arg in args:\n bolso += arg\n return bolso\n\n\ndef soma_coisa(arg1, arg2):\n resultado = arg1 + arg2\n return resultado\n\nprint(soma_coisa(2, 3))\n\ndef entende_o_objeto(**kwargs):\n for key, value in kwargs.items():\n print(key, value)\n\nentende_o_objeto(nome='fellipe', idade=29)\n\n\ndef alguma_coisa(**kwargs):\n referencias = {\n 'nome':'fellipe',\n 'idade': 29,\n 'endereço': 'mundo',\n}\n\n referencias.update(kwargs)\n print(referencias)\n\nalguma_coisa(nome='alguem', idade=2, endereço='casa')\n","repo_name":"pipibodock/Meus_exercicios","sub_path":"args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72065991945","text":"from django.urls import path\n\nfrom users import views\n\napp_name = 'users'\n\nurlpatterns = [\n path('login/', views.LoginFormView.as_view(), name='login'),\n path('register/', views.RegisterFormView.as_view(), name='register'),\n path('logout/', views.LogoutView.as_view(), name='logout'),\n]\n","repo_name":"ZakonGyka/Tree_of_departments","sub_path":"tree_structure/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35187367541","text":"#\n# @lc app=leetcode.cn id=68 lang=python3\n#\n# [68] 文本左右对齐\n#\n\n# @lc code=start\nfrom typing import List\nclass Solution:\n def valid(self,line:List[str],word:str)->bool:\n return sum([len(i) for i in line])+len(line)+len(word)<=self.maxWidth\n def fillspace(self,line:List[str])->str:\n if len(line)==1:return line[0]+\" \"*(self.maxWidth-len(line[0]))\n total_spaces=self.maxWidth-sum([len(i) for i in line])\n places=len(line)-1\n base=total_spaces//places\n extra=total_spaces%places\n ret=[]\n for i in range(places):\n ret.append(line[i])\n ret.append(\" \"*(base+(i<extra)))\n ret.append(line[-1])\n return \"\".join(ret)\n def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:\n paper=[[]]\n self.maxWidth=maxWidth\n for s in words:\n if self.valid(paper[-1],s):\n paper[-1].append(s)\n else:\n paper.append([s])\n for i in range(len(paper)-1):\n paper[i]=self.fillspace(paper[i])\n paper[-1]=\" \".join(paper[-1])\n paper[-1]+=\" \"*(maxWidth-len(paper[-1]))\n return paper\n# @lc code=end\n\nprint(Solution().fullJustify([\"Science\",\"is\",\"what\",\"we\",\"understand\",\"well\",\"enough\",\"to\",\"explain\",\"to\",\"a\",\"computer.\",\"Art\",\"is\",\"everything\",\"else\",\"we\",\"do\"],20))","repo_name":"HellOwhatAs/Leetcode","sub_path":"68.文本左右对齐.py","file_name":"68.文本左右对齐.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6597539311","text":"\n\nimport wx\n\nfrom ..settings import SettingsManager\n\n\n\nclass Slider(wx.Panel):\n \n \n def __init__(self, parent, id, widget, key, dtype='float', digits=2):\n wx.Panel.__init__(self, parent, id)\n self.widget = widget\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n \n self.dtype = dtype\n self.btnReset = wx.Button(self, wx.ID_ANY, label='X', style=wx.BU_EXACTFIT)\n self.Bind(wx.EVT_BUTTON, self._on_btn_reset, self.btnReset)\n \n \n if dtype == 'float':\n self.spinner = wx.SpinCtrlDouble(self, wx.ID_ANY, size=(75, -1), style=wx.SP_ARROW_KEYS|wx.TE_PROCESS_ENTER)\n self.spinner.SetDigits(digits)\n self.Bind(wx.EVT_SPINCTRLDOUBLE, self._on_spinner_updated, self.spinner)\n self.scale = digits*10.0\n self._to_spinner_value = self._to_float\n else:\n self.spinner = wx.SpinCtrl(self, wx.ID_ANY, size=(75, -1), style=wx.SP_ARROW_KEYS|wx.TE_PROCESS_ENTER)\n self.Bind(wx.EVT_SPINCTRL, self._on_spinner_updated, self.spinner)\n self.scale = 1.0\n self._to_spinner_value = self._to_int\n \n self.Bind(wx.EVT_TEXT_ENTER, self._on_spinner_updated, self.spinner)\n \n self.slider = wx.Slider(self, wx.ID_ANY)\n \n self.Bind(wx.EVT_SLIDER, self._on_slider_updated, self.slider)\n \n sizer.Add(self.btnReset, 0, wx.RIGHT | wx.EXPAND, 3)\n sizer.Add(self.spinner, 0, wx.ALIGN_CENTER_VERTICAL)\n sizer.Add(self.slider, 1, wx.LEFT | wx.EXPAND, 3)\n \n self.SetSizer(sizer)\n \n SettingsManager.register_widget(key, self)\n\n\n def SetMin(self, v):\n self.spinner.SetMin(self._to_spinner_value(v))\n self.slider.SetMin(self._to_int(v))\n \n \n def SetMax(self, v):\n self.spinner.SetMax(self._to_spinner_value(v))\n self.slider.SetMax(self._to_int(v))\n \n \n def SetRange(self, min, max):\n self.spinner.SetRange(self._to_spinner_value(min), self._to_spinner_value(max))\n self.slider.SetRange(self._to_int(min), self._to_int(max))\n \n \n def SetIncrement(self, v):\n if isinstance(self.spinner, wx.SpinCtrlDouble):\n self.spinner.SetIncrement(self._to_spinner_value(v))\n \n \n def SetValue(self, v):\n self.spinner.SetValue(self._to_spinner_value(v)) \n self.slider.SetValue(self._to_int(v))\n \n \n def GetValue(self):\n if self.dtype == 'float':\n return self.spinner.GetValue() \n else:\n return int(self.slider.GetValue())\n \n \n def _on_spinner_updated(self, event):\n if hasattr(event, 'Value'):\n v = event.Value \n if hasattr(event, 'Int'):\n v = event.Int \n else:\n v = float(event.String)\n \n self.slider.SetValue(self._to_int(v))\n self._on_event()\n\n \n def _on_slider_updated(self, event):\n v = event.Int \n v = self._to_spinner_value(v)\n self.spinner.SetValue(v) \n self._on_event()\n \n \n def _on_event(self):\n class E(object):\n Value = None \n EventObject = None \n \n e = E()\n e.Value = self.GetValue()\n e.EventObject = self\n self.widget.on_setting_updated(e)\n \n\n def _on_btn_reset(self, event):\n self.SetValue(self.default_value)\n self._on_event()\n\n\n def _to_int(self, v):\n if isinstance(v, int):\n return v \n \n return int(v * self.scale)\n \n \n def _to_float(self, v):\n if isinstance(v, float):\n return v \n \n return v / self.scale\n ","repo_name":"SiliconLabs/mltk","sub_path":"mltk/utils/audio_visualizer/gui/slider.py","file_name":"slider.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"81"} +{"seq_id":"72071830985","text":"class Solution:\n def isPalindrome(self, x: int) -> bool:\n if x < 0 : return False\n elif x == 0 : return True\n elif x % 10 == 0 : return False\n \n reco = 0\n xx = x\n while xx > reco:\n reco = reco * 10 + xx % 10\n xx = xx // 10\n\n return xx == reco or xx == reco // 10 \n","repo_name":"CastleWhite/LeetCodeProblems","sub_path":"9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"17881362833","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.spider import Spider\nimport logging, json, math, datetime, re\nfrom scrapy.selector import Selector \nfrom works.items import WorksItem\nfrom bs4 import BeautifulSoup\n\nclass ZhilianSpider(scrapy.Spider):\n name = \"zhilian\"\n download_delay = 1\n allowed_domains = ['zhaopin.com', 'jobs.zhaopin.com', 'sou.zhaopin.com']\n start_urls = ['http://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7&kw=python&sm=0&isfilter=1&p=1']\n\n def parse(self, response):\n logging.debug('---------------------------------------------------------------------------------------------------')\n rep = Selector(response)\n urls = rep.xpath('//td[@class=\"zwmc\"]//a[@style=\"font-weight: bold\"]/@href').extract()\n for url in urls:\n yield scrapy.Request(url=url, callback=self.get_item, meta={'url': url})\n next_page = rep.xpath('//a[@class=\"next-page\"]')\n if len(next_page) != 0:\n url = next_page[0].xpath('@href').extract()[0]\n yield scrapy.Request(url=url, callback=self.parse)\n\n def get_item(self, response):\n item = WorksItem()\n soup = BeautifulSoup(response._body, 'lxml')\n fl = soup.find('div', {'class': 'fl'})\n item['positionName'] = fl.h1.string.strip()\n item['companyFullName'] = fl.h2.get_text().strip()\n item['companyLabelList'] = '/'.join([span.string.strip() for span in fl.select('div.welfare-tab-box span')])\n info = soup.find_all('ul', {'class': 'terminal-ul'})\n ss1 = info[1].find_all('strong')\n ss1_need = ['companySize', 'company_type', 'industryField', 'website', 'address']\n if len(ss1) ==4: del ss1_need[3]\n for i in range(len(ss1_need)):\n item[ss1_need[i]] = ss1[i].get_text().strip()\n ss0 = info[0].find_all('strong')\n ss0_need = ['salary', 'city', 'createTime', 'jobNature', 'workYear', 'education']\n for i in range(len(ss0_need)):\n if i == 2:\n create_date = ss0[i].get_text().strip()\n if create_date == u'昨天':\n item[ss0_need[i]] = str(datetime.date.today() - datetime.timedelta(days=1))\n elif create_date == u'今天' or create_date == u'刚刚':\n item[ss0_need[i]] = str(datetime.date.today())\n elif create_date == u'前天':\n item[ss0_need[i]] = str(datetime.date.today() - datetime.timedelta(days=2))\n elif re.search('^\\d{4}-\\d{2}-\\d{2}$', create_date) > -1:\n item[ss0_need[i]] = create_date\n else:\n item[ss0_need[i]] = '1900-01-01'\n else:\n item[ss0_need[i]] = ss0[i].get_text().strip()\n com_detail = soup.find('div', {'class': 'tab-cont-box'}).select('div.tab-inner-cont')\n item['detail'] = ''.join([re.sub('[\\' ]', '`', p.get_text().strip()) for p in com_detail[0].select('p')])\n item['company_info'] = re.sub('[\\' ]', '`', com_detail[1].select('p')[0].get_text().strip())\n return item\n","repo_name":"zhenxianluo/help-you-find-working","sub_path":"works/spiders/zhilian.py","file_name":"zhilian.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"21108332569","text":"import inspect\nimport threading\nimport time\nimport unittest\n\nfrom pyglove.core import geno\nfrom pyglove.core import hyper\nfrom pyglove.core import symbolic\nfrom pyglove.core import typing as pg_typing\n\n# Import 'in-memory' backend as the default backend.\nfrom pyglove.core.tuning import local_backend # pylint: disable=unused-import\n\nfrom pyglove.core.tuning import protocols\nfrom pyglove.core.tuning.backend import poll_result as pg_poll_result\nfrom pyglove.core.tuning.early_stopping import EarlyStoppingPolicy\nfrom pyglove.core.tuning.sample import sample as pg_sample\n\n\nclass DummyEarlyStoppingPolicy(EarlyStoppingPolicy):\n \"\"\"Early stopping policy for testing.\"\"\"\n\n def should_stop_early(self, trial):\n # NOTE(daiyip): stop trial 2, 4, 8 at step 1.\n if trial.id in [2, 4, 8]:\n if trial.measurements and trial.measurements[-1].step > 0:\n return True\n return False\n\n\nclass DummySingleObjectiveAlgorithm(geno.DNAGenerator):\n \"\"\"Single-object algorithm for testing.\"\"\"\n\n def _setup(self):\n self.rewards = []\n\n def _propose(self):\n return geno.DNA(1)\n\n def _feedback(self, dna, reward):\n self.rewards.append(reward)\n\n\nclass DummyMultiObjectiveAlgorithm(DummySingleObjectiveAlgorithm):\n \"\"\"Multi-objective algorithm for testing.\"\"\"\n\n @property\n def multi_objective(self):\n return True\n\n\nclass SamplingTest(unittest.TestCase):\n \"\"\"Test `pg.sample` with the default tuning backend.\"\"\"\n\n def test_sample_with_set_metadata(self):\n feedbacks = []\n algo = geno.Random(seed=1)\n for example, f in pg_sample(\n symbolic.Dict(x=hyper.oneof([5, 6, 7])),\n algorithm=algo,\n num_examples=10,\n name='my_search',):\n self.assertIsNone(f.checkpoint_to_warm_start_from)\n\n f.set_metadata('example', example)\n self.assertEqual(f.get_metadata('example'), example)\n\n f.set_metadata('global_key', 1, per_trial=False)\n self.assertEqual(f.get_metadata('global_key', per_trial=False), 1)\n\n f.add_link('filepath', f'http://path/to/file_{example.x}')\n with f.skip_on_exceptions([ValueError]):\n if f.id == 5:\n raise ValueError('bad trial')\n f(example.x)\n feedbacks.append(f)\n\n self.assertEqual(algo.num_proposals, 10)\n self.assertEqual(len(feedbacks), 10)\n self.assertEqual([c.id for c in feedbacks], list(range(1, 11)))\n\n # Test `poll_result`.\n result = pg_poll_result('my_search')\n self.assertTrue(result.is_active)\n self.assertIsNotNone(result.best_trial)\n self.assertEqual(result.best_trial.final_measurement.reward, 7.0)\n self.assertEqual(result.best_trial.dna, geno.DNA.parse([2]))\n self.assertEqual(result.best_trial.metadata.example, symbolic.Dict(x=7))\n self.assertEqual(\n result.best_trial.related_links.filepath, 'http://path/to/file_7')\n\n self.assertEqual(result.metadata['global_key'], 1)\n self.assertEqual(len(result.trials), 10)\n # TODO(daiyip): Move this test to 'local_backend_test.py'\n self.assertEqual(\n str(result),\n inspect.cleandoc('''{\n 'name': 'my_search',\n 'status': {\n 'COMPLETED': '10/10'\n },\n 'infeasible': '1/10',\n 'best_trial': {\n 'id': 2,\n 'reward': 7.0,\n 'step': 0,\n 'dna': 'DNA(2)'\n }\n }'''))\n\n def test_sample_with_skip_on_exceptions(self):\n search_space = symbolic.Dict(x=hyper.oneof(range(10)))\n algo = geno.Random(seed=1)\n sample = pg_sample(search_space, algo)\n _, f = next(sample)\n\n with f.skip_on_exceptions((ValueError,)):\n # should succeed.\n f(0)\n self.assertEqual(algo.num_proposals, 1)\n self.assertEqual(algo.num_feedbacks, 1)\n\n _, f = next(sample)\n with f.skip_on_exceptions((ValueError,)):\n # should skip.\n raise ValueError\n self.assertEqual(algo.num_proposals, 2)\n self.assertEqual(algo.num_feedbacks, 1)\n\n _, f = next(sample)\n with f.skip_on_exceptions((ValueError,)):\n # should skip.\n raise ValueError('abc')\n self.assertEqual(algo.num_proposals, 3)\n self.assertEqual(algo.num_feedbacks, 1)\n\n _, f = next(sample)\n with f.skip_on_exceptions((Exception,)):\n # should skip.\n raise ValueError('abc')\n self.assertEqual(algo.num_proposals, 4)\n self.assertEqual(algo.num_feedbacks, 1)\n\n _, f = next(sample)\n with f.skip_on_exceptions(((ValueError, '.*a'),)):\n # should skip.\n raise ValueError('abc')\n self.assertEqual(algo.num_proposals, 5)\n self.assertEqual(algo.num_feedbacks, 1)\n\n _, f = next(sample)\n with f.skip_on_exceptions(((Exception, '.*a'),)):\n # should skip.\n raise ValueError('abc')\n self.assertEqual(algo.num_proposals, 6)\n self.assertEqual(algo.num_feedbacks, 1)\n\n _, f = next(sample)\n with self.assertRaisesRegex(\n ValueError, 'bcd'):\n with f.skip_on_exceptions(((Exception, '.*a'),)):\n # should skip.\n raise ValueError('bcd')\n self.assertEqual(algo.num_proposals, 7)\n self.assertEqual(algo.num_feedbacks, 1)\n\n _, f = next(sample)\n with f.skip_on_exceptions(((ValueError, '.*a'),\n (ValueError, '.*b'),\n KeyError)):\n # should skip.\n raise ValueError('bcd')\n self.assertEqual(algo.num_proposals, 7)\n self.assertEqual(algo.num_feedbacks, 1)\n\n _, f = next(sample)\n with f.skip_on_exceptions(((ValueError, '.*a'),\n (ValueError, '.*b'),\n KeyError)):\n # should skip.\n raise KeyError\n self.assertEqual(algo.num_proposals, 8)\n self.assertEqual(algo.num_feedbacks, 1)\n\n def test_sample_with_race_condition(self):\n _, f = next(pg_sample(hyper.oneof([1, 2, 3]), geno.Random(seed=1)))\n\n f(1)\n with self.assertRaisesRegex(\n protocols.RaceConditionError,\n '.*Measurements can only be added to PENDING trials.*'):\n f.add_measurement(0.1)\n\n with f.ignore_race_condition():\n f.add_measurement(0.1)\n\n def test_sample_with_dynamic_evaluation(self):\n def fun():\n return hyper.oneof([1, 2, 3]) + hyper.oneof([3, 4, 5])\n\n for example, f in pg_sample(\n hyper.trace(fun),\n geno.Sweeping(), num_examples=6, name='define-by-run-search'):\n with example():\n f(fun())\n\n # Test `poll_result`.\n result = pg_poll_result('define-by-run-search')\n rewards = [t.final_measurement.reward for t in result.trials]\n self.assertEqual(rewards, [4., 5., 6., 5., 6., 7.])\n\n def test_sample_with_dna_spec(self):\n dna_spec = geno.space([\n geno.oneof([geno.constant(), geno.constant(), geno.constant()]),\n ])\n for dna, f in pg_sample(\n dna_spec, geno.Sweeping(), name='sample-dnaspec'):\n f(dna.value)\n\n # Test `poll_result`.\n result = pg_poll_result('sample-dnaspec')\n rewards = [t.final_measurement.reward for t in result.trials]\n self.assertEqual(rewards, [0, 1, 2])\n\n def test_sample_with_continuation_and_end_loop(self):\n hyper_value = symbolic.Dict(x=hyper.oneof([1, 2, 3]))\n for _, feedback in pg_sample(\n hyper_value,\n algorithm=geno.Random(seed=1),\n name='my_search2'):\n # Always invoke the feedback function in order to advance\n # to the next trail.\n feedback(0.)\n if feedback.id == 2:\n # We break without ending the loop\n break\n\n result = pg_poll_result('my_search2')\n self.assertTrue(result.is_active)\n self.assertEqual(len(result.trials), 2)\n\n sample1 = pg_sample(\n hyper_value,\n algorithm=geno.Random(seed=1),\n name='my_search2')\n\n # Make sure sampling within the same worker get the same trial IDs before\n # feedback.\n _, c1 = next(sample1)\n self.assertEqual(c1.id, 3)\n self.assertEqual(c1.get_trial().id, 3)\n\n _, c1b = next(sample1)\n self.assertEqual(c1b.id, 3)\n c1(0.)\n\n # Make sure sampling within the same worker get different trial IDs after\n # previous trial is done.\n _, c1c = next(sample1)\n self.assertEqual(c1c.id, 4)\n\n # Make sure after `end_loop`, sampling will raise StopIteration error.\n # Also the study is no longer active.\n c1c.end_loop()\n with self.assertRaises(StopIteration):\n next(sample1)\n self.assertFalse(result.is_active)\n\n def test_sample_with_single_objective(self):\n algo = DummySingleObjectiveAlgorithm()\n _, f = next(pg_sample(\n hyper.oneof([1, 2]), algo,\n metrics_to_optimize=['reward']))\n\n with self.assertRaisesRegex(\n ValueError,\n '\\'reward\\' must be provided as it is a goal to optimize'):\n f()\n f(1.0)\n self.assertEqual(algo.rewards, [1.0])\n\n algo = DummySingleObjectiveAlgorithm()\n _, f = next(pg_sample(\n hyper.oneof([1, 2]), algo,\n metrics_to_optimize=['accuracy']))\n\n with self.assertRaisesRegex(\n ValueError,\n 'Metric .* must be provided as it is a goal to optimize.'):\n f.add_measurement(0.0)\n\n with self.assertRaisesRegex(\n ValueError,\n '\\'reward\\' .* is provided while it is not a goal to optimize'):\n f.add_measurement(0.0, metrics={'accuracy': 2.0})\n\n f(metrics={'accuracy': 2.0})\n self.assertEqual(algo.rewards, [2.0])\n\n with self.assertRaisesRegex(\n ValueError,\n '\\'metrics_to_optimize\\' should include only 1 metric as '\n 'multi-objective optimization is not supported'):\n next(pg_sample(\n hyper.oneof([1, 2]), DummySingleObjectiveAlgorithm(),\n metrics_to_optimize=['reward', 'accuracy', 'latency']))\n\n def test_sample_with_multi_objective(self):\n algo = DummyMultiObjectiveAlgorithm()\n it = pg_sample(\n hyper.oneof([1, 2]), algo,\n metrics_to_optimize=['reward', 'accuracy', 'latency'])\n _, f = next(it)\n with self.assertRaisesRegex(\n ValueError,\n '\\'reward\\' must be provided as it is a goal to optimize'):\n f.add_measurement(metrics={'accuracy': 0.9, 'latency': 0.5})\n f(0., metrics={'accuracy': 0.9, 'latency': 0.5})\n\n self.assertEqual(algo.rewards, [(0., 0.9, 0.5)])\n _, f = next(it)\n\n f((0.1, 0.2, 0.3))\n self.assertEqual(algo.rewards, [(0., 0.9, 0.5), (0.1, 0.2, 0.3)])\n self.assertEqual(f.get_trial().final_measurement.metrics, {\n 'accuracy': 0.2,\n 'latency': 0.3\n })\n\n _, f = next(it)\n with self.assertRaisesRegex(\n ValueError,\n 'The number of items in the reward .* does not match '):\n f((0.1, 0.2))\n\n with self.assertRaisesRegex(\n ValueError,\n 'The value for metric .* is provided from both .* different values'):\n f((0.1, 0.2, 0.3), metrics={'accuracy': 0.5})\n\n algo = DummyMultiObjectiveAlgorithm()\n _, f = next(pg_sample(hyper.oneof([1, 2]), algo))\n f(1.)\n self.assertEqual(algo.rewards, [(1.,)])\n\n def test_sample_with_controller_evaluated_rewards(self):\n\n @symbolic.members([\n ('num_objectives', pg_typing.Int(min_value=1))\n ])\n class MaybeControllerEvaluated(geno.DNAGenerator):\n\n def _setup(self):\n self._dna = None\n\n @property\n def multi_objective(self):\n return self.num_objectives > 1\n\n def _propose(self):\n self._dna = self.dna_spec.next_dna(self._dna)\n if self.num_proposals % 2 == 1:\n if self.multi_objective:\n reward = tuple([0.] * self.num_objectives)\n else:\n reward = 0.\n self._dna.set_metadata('reward', reward)\n return self._dna\n\n algo = MaybeControllerEvaluated(1)\n client_evaluated = []\n for x, f in pg_sample(hyper.oneof(range(100)), algo, 10):\n f(x)\n client_evaluated.append(f.dna)\n\n self.assertEqual(\n client_evaluated,\n [geno.DNA(0), geno.DNA(2), geno.DNA(4), geno.DNA(6), geno.DNA(8)])\n self.assertEqual(algo.num_proposals, 10)\n self.assertEqual(algo.num_feedbacks, 10)\n\n algo = MaybeControllerEvaluated(2)\n client_evaluated = []\n for x, f in pg_sample(\n hyper.oneof(range(100)), algo, 10, metrics_to_optimize=['a', 'b']):\n f(None, metrics=dict(a=0., b=0.))\n client_evaluated.append(f.dna)\n\n self.assertEqual(\n client_evaluated,\n [geno.DNA(0), geno.DNA(2), geno.DNA(4), geno.DNA(6), geno.DNA(8)])\n self.assertEqual(algo.num_proposals, 10)\n self.assertEqual(algo.num_feedbacks, 10)\n\n it = pg_sample(hyper.oneof(range(100)), algo, 10)\n # The first call will get a DNA to be evaluated at the client side.\n # It should pass\n x, f = next(it)\n f(0.0)\n\n # The second call will poll a DNA evaluated at the controller side.\n # Since the number of reward items is 2 while metrics_to_optimize is 1,\n # there will be an error.\n with self.assertRaisesRegex(\n ValueError, 'The number of items in the reward .* does not match'):\n next(it)\n\n def test_sample_with_early_stopping(self):\n stopped_trial_steps = []\n early_stopping_policy = DummyEarlyStoppingPolicy()\n for _, f in pg_sample(\n symbolic.Dict(x=hyper.oneof([1, 2])),\n geno.Random(seed=1), 10,\n early_stopping_policy,\n name='early_stopping'):\n skipped = False\n for step in [0, 1, 2]:\n if f.should_stop_early():\n stopped_trial_steps.append((f.id, step))\n skipped = True\n break\n else:\n f.add_measurement(0., step=step)\n if skipped:\n f.skip()\n else:\n f.done()\n\n self.assertEqual(stopped_trial_steps, [(2, 2), (4, 2), (8, 2)])\n\n result = pg_poll_result('early_stopping')\n for t in result.trials:\n if t.id in [2, 4, 8]:\n self.assertTrue(t.infeasible)\n self.assertEqual(len(t.measurements), 2)\n self.assertEqual(\n t.final_measurement,\n protocols.Measurement(step=0, reward=0.0, elapse_secs=0.0))\n else:\n self.assertFalse(t.infeasible)\n self.assertEqual(t.final_measurement.step, 2)\n self.assertEqual(len(t.measurements), 3)\n\n def test_sample_with_concurrent_workers(self):\n threads_trial_ids = []\n def create_worker_func(study_name, num_examples=None, group_id=None):\n hyper_value = symbolic.Dict(x=hyper.oneof([1, 2, 3]))\n def worker_func():\n trial_ids = []\n threads_trial_ids.append(trial_ids)\n for _, feedback in pg_sample(\n hyper_value,\n algorithm=geno.Random(seed=1),\n group=group_id,\n num_examples=num_examples,\n name=study_name):\n trial_ids.append(feedback.id)\n feedback(0.)\n if feedback.id == 7:\n feedback.end_loop()\n time.sleep(0.1)\n return worker_func\n\n # Test multiple worker thread with different group ID.\n num_workers = 3\n worker_func = create_worker_func('mt_search_different_group', 10)\n threads = [threading.Thread(target=worker_func) for _ in range(num_workers)]\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n self.assertEqual(len(threads_trial_ids), 3)\n all_trial_ids = []\n for ids_per_thread in threads_trial_ids:\n all_trial_ids.extend(ids_per_thread)\n\n # Make sure different threads get different trials.\n self.assertCountEqual(all_trial_ids, set(all_trial_ids))\n # NOTE(daiyip): when worker with trial#7 trigger end_loop, other 2 threads\n # may already move forward to #8 and #9. But it should not go beyond that\n # point due to every iteration sleeps for 100 ms.\n self.assertIn(len(all_trial_ids), list(range(7, 7 + num_workers)))\n\n def test_bad_sampling(self):\n _, f = next(pg_sample(\n symbolic.Dict(x=hyper.oneof([1, 2])),\n geno.Random(seed=1), 1))\n with self.assertRaisesRegex(\n ValueError, 'At least one measurement should be added for trial'):\n f.done()\n\n with self.assertRaisesRegex(\n ValueError, '\\'space\\' is a constant value'):\n next(pg_sample(1, geno.Random(seed=1)))\n\n with self.assertRaisesRegex(\n ValueError, 'Backend .* does not exist.'):\n next(pg_sample(\n hyper.oneof([1, 2]), geno.Random(seed=1),\n backend='non-exist'))\n\n # Using the sample algorithm to optimize different search spaces will\n # trigger a value error.\n algo = geno.Random(seed=1)\n early_stopping_policy = DummyEarlyStoppingPolicy()\n next(pg_sample(\n hyper.oneof([1, 2]), algo, 1, early_stopping_policy))\n with self.assertRaisesRegex(\n ValueError, '.* has been set up with a different DNASpec'):\n next(pg_sample(symbolic.Dict(x=hyper.oneof([3, 4])), algo))\n\n algo = geno.Random(seed=1)\n with self.assertRaisesRegex(\n ValueError, '.* has been set up with a different DNASpec'):\n next(pg_sample(\n symbolic.Dict(x=hyper.oneof([1, 2])),\n algo, 1, early_stopping_policy))\n\n with self.assertRaisesRegex(\n ValueError, 'Result .* does not exist.'):\n pg_poll_result('non-exist-search')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"google/pyglove","sub_path":"pyglove/core/tuning/sample_test.py","file_name":"sample_test.py","file_ext":"py","file_size_in_byte":16938,"program_lang":"python","lang":"en","doc_type":"code","stars":306,"dataset":"github-code","pt":"81"} +{"seq_id":"1896272040","text":"# -*- coding: UTF-8 -*-\nimport time\nimport csv\nimport re\n\n\ndict_data = {}\ncount = 0\n\nprint(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + \", open ecdict.csv\")\n\nwith open('ecdict.csv', 'r', encoding='utf-8') as ec:\n f_ecdict = csv.reader(ec)\n headers = next(f_ecdict)\n for row in f_ecdict:\n # 如果需要将释义中的换行符去掉,则取消下面一行的注释\n row[3] = row[3].replace(u'\\\\n', ' ')\n # NOTE: 将词典的 key转换为小写\n dict_data[row[0].lower()] = row[3]\n\nprint(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + \", got dict_data\")\n\nwith open('shhard.txt', 'r', encoding='utf-8') as w:\n word_list = w.read().split('\\n')\n\nwith open('shorthistory.txt', 'r', encoding='utf-8') as ori:\n all_text = ori.read()\n\n# 将lemmas.txt转化成两个字典:\n# 第一个字典,key是lemmas.txt每行的第一列,value是以list格式存放的lemma.txt一整行内容\n# 第二个字典,key是lemmas.txt的每一个单词,value是key所在行的第一列,value只有一个单词\n# NOTE: 两个字典的内容均为 小写\n\nlemmas = {}\nre_lemmas = {}\n\nprint(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + \", open lemmmas.txt\")\n\nwith open('lemmas.txt', 'r', encoding='utf-8') as lemmas_file:\n temp_lemmas = lemmas_file.readlines()\n for line in temp_lemmas:\n parts = line.split()\n lemmas[parts[0].lower()] = []\n for i in range(0, len(parts)):\n lemmas[parts[0].lower()].append(parts[i].lower())\n re_lemmas[parts[i].lower()] = parts[0].lower()\n\nprint(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + \", got lemmmas and re_lemmas\")\n\n# 将w_list和w_list所有单词的其他形式,以及中文意思,存到all_words_trans\n\nall_words_trans = {}\nfor w in word_list:\n if w:\n w_lemmas = lemmas.get(w)\n for w_le in w_lemmas:\n a_tran = dict_data.get(w_le.lower())\n if not a_tran:\n org_w = re_lemmas.get(w_le.lower())\n if org_w:\n org_w_tran = dict_data.get(org_w)\n if org_w_tran:\n a_tran = org_w_tran\n else:\n print('Error, \"%s\" is not in dict_data' % org_w)\n else:\n print('Error, \"%s\" is not in re_lamms' % org_w)\n if a_tran:\n all_words_trans[w_le] = a_tran\n else:\n all_words_trans[w_le] = \"No translation\"\nprint(all_words_trans)\n\nprint(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + \", begin to replace\")\n\nfor w, tran in all_words_trans.items():\n # 替换之外,给生词和翻译加上html格式\n w_tran = '<font color=red>'+ w + '</font>'+ '(<font color=blue size=-1>' + tran + '</font>)'\n # 为避免误替换单词,被替换的单词前后必须有以下标点的任意一个:\n # 空格,:.'?!@;()\\r\\n\n # NOTE: 如果一个单词连续出现两次以上,则只会替换第一个\n\n pattern = re.compile(r'([ ,:\\'\\.\\?!@;(])%s(([ ,:\\'\\.\\?!@;)])|(\\r)|(\\n))' % w)\n all_text = pattern.sub(r'\\1%s\\2' % w_tran, all_text)\n\nprint(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + \", end\" )\n\nall_text = all_text.replace('\\n', '</br>')\n\nwith open('trans_result.html', 'w', encoding='utf-8') as fout:\n fout.write(all_text)\n\nprint(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + \", wrote to file\")","repo_name":"mahavivo/vocabulary","sub_path":"vivo/auto_trans.py","file_name":"auto_trans.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"81"} +{"seq_id":"26837107374","text":"\nfrom . import BaseModel, db, User, CaseGroup, TestCaseScene, RequestHeaders\n\n\nclass TestCases(BaseModel, db.Model):\n __tablename__ = 'testcases'\n url = db.Column(db.String(300), nullable=False)\n data = db.Column(db.TEXT)\n regist_variable = db.Column(db.String(500))\n regular = db.Column(db.TEXT)\n method = db.Column(db.String(10), nullable=False)\n group_id = db.Column(db.Integer, db.ForeignKey(CaseGroup.id))\n request_headers_id = db.Column(db.Integer, db.ForeignKey(RequestHeaders.id))\n testcase_scene_id = db.Column(db.Integer, db.ForeignKey(TestCaseScene.id))\n hope_result = db.Column(db.String(200))\n is_model = db.Column(db.Integer)\n user_id = db.Column(db.Integer, db.ForeignKey(User.id))\n old_sql = db.Column(db.String(200))\n new_sql = db.Column(db.String(200))\n old_sql_regist_variable = db.Column(db.String(200))\n new_sql_regist_variable = db.Column(db.String(200))\n old_sql_hope_result = db.Column(db.String(200))\n new_sql_hope_result = db.Column(db.String(200))\n old_sql_id = db.Column(db.Integer)\n new_sql_id = db.Column(db.Integer)\n\n def __init__(self, name, url, data, regist_variable, regular, method, group_id=1,\n request_headers_id=1, testcase_scene_id=None, hope_result='', is_model=0, user_id=1,\n old_sql='', new_sql='', old_sql_regist_variable='', new_sql_regist_variable='',\n old_sql_hope_result='', new_sql_hope_result='', old_sql_id=None, new_sql_id=None):\n super().__init__(name)\n self.regist_variable = regist_variable\n self.regular = regular\n self.url = url\n self.data = data\n self.method = method\n self.group_id = group_id\n self.request_headers_id = request_headers_id\n self.hope_result = hope_result\n self.testcase_scene_id = testcase_scene_id\n self.is_model = is_model\n self.user_id = user_id\n self.old_sql = old_sql\n self.new_sql = new_sql\n self.old_sql_regist_variable = old_sql_regist_variable\n self.new_sql_regist_variable = new_sql_regist_variable\n self.old_sql_hope_result = old_sql_hope_result\n self.new_sql_hope_result = new_sql_hope_result\n self.old_sql_id = old_sql_id\n self.new_sql_id = new_sql_id\n\n def __repr__(self):\n return \"<TestCase:%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s, %s >\" % (\n self.id, self.name, self.url, self.data, self.method,\n self.group_id, self.request_headers_id, self.timestamp, self.regist_variable,\n self.regular, self.hope_result, self.testcase_scene_id, self.is_model, self.user_id)\n\n def to_dict(self, wait=None):\n if wait is None:\n wait = {}\n return dict(id=self.id, name=self.name, url=self.url, header_id=self.request_headers_id,\n data=self.data, regist_variable=self.regist_variable, method=self.method, group_id=self.group_id,\n regular=self.regular, hope_result=self.hope_result, testcase_scene_id=self.testcase_scene_id,\n is_model=self.is_model, user_id=self.user_id, old_sql=self.old_sql, new_sql=self.new_sql,\n old_sql_regist_variable=self.old_sql_regist_variable, new_sql_regist_variable=\n self.new_sql_regist_variable, old_sql_hope_result=self.old_sql_hope_result, new_sql_hope_result=\n self.new_sql_hope_result, old_sql_id=self.old_sql_id, new_sql_id=self.new_sql_id, wait=wait)\n","repo_name":"yangleiqing0/MyVueFlask","sub_path":"modles/testcase.py","file_name":"testcase.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41262729857","text":"# -*- coding: utf-8 -*-\n# @Author: WuLC\n# @Date: 2017-09-06 12:47:58\n# @Last Modified by: WuLC\n# @Last Modified time: 2017-09-06 23:19:43\n\n\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\n# recursive \n# duplicate result for some cases like [2,2,2,3,null,3,null]\n# Definition for a binary tree node.\n\nclass Solution(object):\n \n def __init__(self):\n self.result = []\n \n def findDuplicateSubtrees(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[TreeNode]\n \"\"\"\n self.result = []\n if root != None:\n self.helper(root.left, root.right)\n return self.result\n \n def helper(self, r1, r2):\n if r1 == None or r2 == None:\n return\n if r1.val == r2.val and self.isSame(r1, r2):\n self.result.append(r1)\n self.helper(r1, r2.left)\n self.helper(r1, r2.right)\n self.helper(r2, r1.left)\n self.helper(r2, r1.right)\n \n def isSame(self, r1, r2):\n if r1 == None and r2 == None:\n return True\n elif r1 == None or r2 == None:\n return False\n else:\n return r1.val == r2.val and r1.left == r2.left and r1.right == r2.right\n \n# use a string to represent a tree\n# the string is obtained by postorder traversal of the tree\nclass Solution(object):\n def findDuplicateSubtrees(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[TreeNode]\n \"\"\"\n result = []\n counter = {}\n self.preorder(root, counter, result)\n return result\n \n def preorder(self, root, counter, result):\n if root == None:\n return '#'\n curr = str(root.val) + ',' + self.preorder(root.left, counter, result) + ',' + self.preorder(root.right, counter, result)\n counter.setdefault(curr, 0)\n counter[curr] += 1\n if counter[curr] == 2:\n result.append(root)\n return curr","repo_name":"WuLC/LeetCode","sub_path":"Algorithm/Python/652. Find Duplicate Subtrees.py","file_name":"652. Find Duplicate Subtrees.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"38848306985","text":"import pandas as pd\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport logging\nimport time\nimport numpy as np\n\nfrom ScrapingTool.Generic.connection_status_code import get_response_code\n\n\ndef get_random_ua():\n random_ua = ''\n ua_file = 'ScrapingTool/Generic/files/ua_file.txt'\n try:\n with open(ua_file) as f:\n lines = f.readlines()\n if len(lines) > 0:\n prng = np.random.RandomState()\n index = prng.permutation(len(lines) - 1)\n idx = np.asarray(index, dtype=np.integer)[0]\n random_ua = lines[int(idx)]\n random_ua = random_ua.replace('\\n', ' ').replace('\\r', '')\n except Exception as ex:\n print('Exception in random_ua')\n print(str(ex))\n finally:\n return random_ua\n\ndef parse(url):\n \"\"\"\n This function is to get html parsed data.\n by using BeautifulSoup library we are fetching html parsed data\n :return: soup which contain parsed html data\n \"\"\"\n try:\n delays = [7, 4, 6, 2, 10, 19]\n delay = np.random.choice(delays)\n time.sleep(delay)\n user_agent = get_random_ua()\n headers = {\n 'user-agent': user_agent,\n 'referrer': 'https://google.com',\n }\n http_response = requests.get(url,headers=headers)\n soup = BeautifulSoup(http_response.content, \"html.parser\")\n http_response.close()\n except Exception as ex:\n print(str(ex))\n print(\"fetching from google web cache\")\n user_agent = get_random_ua()\n headers = {\n 'user-agent': user_agent,\n 'referrer': 'https://google.com',\n }\n cahe_url = 'http://webcache.googleusercontent.com/search?q=cache:'+url\n http_response = requests.get(cahe_url,headers=headers)\n soup = BeautifulSoup(http_response.content, \"html.parser\")\n http_response.close()\n finally:\n if len(soup) > 0:\n return soup\n else:\n print(\"<<<<<<< URL not accessable >>>>>>\")\n return None\n ","repo_name":"larsen-and-toubro-technology-sevices/SocialMediaIssueTrackingTool","sub_path":"ScrapingTool/Generic/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7661587465","text":"import config\nimport temp_variables\nimport constants\nimport models\n\nfrom utils.tensorize_batch import tensorize_batch\n\nimport os\nimport os.path\nfrom pathlib import Path\nimport torch\n\nfrom datetime import datetime\n\n\nfrom utils import panoptic_fusion, get_datasets\nfrom utils.show_segmentation import apply_semantic_mask_gpu, apply_instance_masks, save_fig\n\n\n\ndevice = torch.device(\n 'cuda') if torch.cuda.is_available() else torch.device('cpu')\nprint(device)\n\ntemp_variables.DEVICE = device\ntorch.cuda.empty_cache()\n\nall_categories, stuff_categories, thing_categories = panoptic_fusion.get_stuff_thing_classes()\n\n\ndef view_masks(model,\n data_loader_val,\n num_classes,\n weights_file,\n result_type,\n folder,\n confidence=0.5):\n\n # Create folde if it doesn't exist\n Path(folder).mkdir(parents=True, exist_ok=True)\n # load weights\n model.load_state_dict(torch.load(weights_file))\n # move model to the right device\n model.to(device)\n for images, anns in data_loader_val:\n images = list(img for img in images)\n images = tensorize_batch(images, device)\n file_names = list(map(lambda ann: ann[\"file_name\"], anns))\n model.eval()\n with torch.no_grad():\n\n outputs = model(images)\n\n if result_type == \"panoptic\":\n panoptic_fusion.get_panoptic_results(\n images, outputs, all_categories, stuff_categories, thing_categories, folder, file_names)\n torch.cuda.empty_cache()\n else: \n for idx, output in enumerate(outputs):\n file_name = file_names[idx]\n if result_type == \"instance\":\n im = apply_instance_masks(images[idx], output['masks'], 0.5)\n\n elif result_type == \"semantic\":\n logits = output[\"semantic_logits\"]\n mask = torch.argmax(logits, dim=0)\n im = apply_semantic_mask_gpu(images[idx], mask, config.NUM_STUFF_CLASSES + config.NUM_THING_CLASSES)\n \n save_fig(im, folder, file_name)\n\n torch.cuda.empty_cache()\n\n\nif __name__ == \"__main__\":\n torch.cuda.empty_cache()\n\n test_dir = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), config.TEST_DIR)\n data_loader_test = get_datasets.get_dataloaders(\n config.BATCH_SIZE, test_dir, is_test_set=True)\n\n confidence = 0.5\n model = models.get_model()\n\n if config.INSTANCE:\n now = datetime.now()\n timestamp = datetime.timestamp(now)\n view_masks(model, data_loader_test, config.NUM_THING_CLASSES,\n config.MODEL_WEIGHTS_FILENAME,\n \"instance\",\n '{}{}_{}_results_instance_{}'.format(constants.INFERENCE_RESULTS,\n config.MODEL, config.BACKBONE, timestamp),\n confidence=0.5)\n\n if config.SEMANTIC:\n now = datetime.now()\n timestamp = datetime.timestamp(now)\n view_masks(model, data_loader_test, config.NUM_THING_CLASSES + config.NUM_THING_CLASSES,\n config.MODEL_WEIGHTS_FILENAME,\n \"semantic\",\n '{}{}_{}_results_semantic_{}'.format(constants.INFERENCE_RESULTS,\n config.MODEL, config.BACKBONE, timestamp),\n confidence=0.5)\n\n if config.PANOPTIC:\n now = datetime.now()\n timestamp = datetime.timestamp(now)\n view_masks(model, data_loader_test, config.NUM_THING_CLASSES + config.NUM_THING_CLASSES,\n config.MODEL_WEIGHTS_FILENAME,\n \"panoptic\",\n '{}{}_{}_results_panoptic_{}'.format(constants.INFERENCE_RESULTS,\n config.MODEL, config.BACKBONE, timestamp),\n confidence=0.5)\n","repo_name":"juanb09111/semantic_depth","sub_path":"inference_scripts/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"4901244052","text":"import gym\nimport random\n\nenv = gym.make(\"ALE/SpaceInvaders-v5\", render_mode = 'human')\nheight, width, channels = env.observation_space.shape\nactions = env.action_space.n\n\n# Specify the desired render mode and fps\nrender_mode = 'human'\nrender_fps = 30 # Set the desired fps value (e.g., 30)\n\nenv.unwrapped.get_action_meanings()\n\n\n\n'''creating a deep learning model with keras'''\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef build_model(height, width, channels, actions):\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Convolution2D(32, (8, 8), strides=(4, 4), activation='relu', input_shape=(height, width, channels)))\n model.add(tf.keras.layers.Convolution2D(64, (4,4), strides = (2,2), activation = 'relu'))\n model.add(tf.keras.layers.Convolution2D(64, (3,3), activation='relu'))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(512, activation = 'relu'))\n model.add(tf.keras.layers.Dense(256, activation = 'relu'))\n model.add(tf.keras.layers.Dense(actions, activation = 'linear'))\n return model\n\nmodel = build_model(height, width, channels, actions)\n\nmodel.summary()\n\n'''build agent with keras-RL'''\n\nfrom rl.agents import DQNAgent\nfrom rl.memory import SequentialMemory\nfrom rl.policy import LinearAnnealedPolicy, EpsGreedyQPolicy\n\ndef build_agent(model, actions):\n policy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', value_max=1, value_min=.1, value_test=.2, nb_steps=10000)\n memory = SequentialMemory(limit=1000, window_length = 3)\n dqn = DQNAgent(model=model, memory=memory, policy=policy, enable_dueling_network=True, \n dueling_type='avg', nb_actions=actions, nb_steps_warmup=1000)\n return dqn\n\ndqn = build_agent(model, actions)\ndqn = compile(tf.keras.optimizers.Adam(lr=1e-4))\ndqn.fit(env, nb_steps=10000 , visualize=False, verbose=2) #training steps\n\nscores = dqn.test(env, nb_episodes=10, visualize=True)\nprint(np.mean(scores.history['episodes_reward']))\n\n'''reloading agent from memory'''\n\ndqn.save_weights('Savedweights/10k-Fast/dqn_weights.h5f')\ndqn.load_weights('Savedweights/1m/dqn_weights.h5f')","repo_name":"6lvcknight/spaceinvaders","sub_path":"space invaders volume 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40429922077","text":"import os\nimport io\nimport shutil\nimport datetime\nfrom werkzeug.utils import secure_filename\nfrom fastapi import FastAPI, HTTPException, UploadFile, File, Form\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom faster_whisper import WhisperModel\nfrom translate import Translator\nfrom pydantic import BaseModel\nimport base64\nimport time\n\n\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n# Set the directory for uploading files\ntoday = datetime.date.today()\ndate_str = today.strftime(\"%Y-%m-%d\")\nUPLOAD_FOLDER = f\"./file/{date_str}\"\n\n\n@app.post(\"/transcribe\")\nasync def transcribe(\n model_size: str = Form(...),\n compute_type: str = Form(...),\n device: str = Form(\"cuda\"),\n to_lang: str = Form(None),\n file: UploadFile = File(...),\n beam_size: int = Form(5),\n):\n start_time = time.time() # 记录开始时间\n print(\n f\"content_type: {file.content_type}\\n\"\n f\"to_lang: {to_lang}\\n\"\n f\"model_size: {model_size}\\n\"\n f\"compute_type: {compute_type}\\n\"\n f\"device: {device}\\n\"\n f\"beam_size: {beam_size}\\n\"\n )\n # Check if the file is an audio file\n ALLOWED_FILE_TYPES = {\n \"audio/mpeg\",\n \"video/mp4\",\n \"audio/mp3\",\n \"audio/ogg\",\n \"application/octet-stream\",\n }\n if file.content_type not in ALLOWED_FILE_TYPES:\n raise HTTPException(\n status_code=400,\n detail=\"Invalid file type, please upload an audio or video file\",\n )\n\n # file.content_type.split(\"/\")[0]\n\n # Check if the directory exists and create it if not\n if not os.path.exists(UPLOAD_FOLDER):\n os.makedirs(UPLOAD_FOLDER)\n\n # The path to the build object file\n filename = secure_filename(file.filename)\n target_path = os.path.join(UPLOAD_FOLDER, filename)\n\n # Save the file to the target path\n with open(target_path, \"wb\") as buffer:\n shutil.copyfileobj(file.file, buffer)\n\n # model_size = tiny, small,base, medium, large-v2,\n # compute_type= \"int8\", \"int8_float16\", \"float16\", \"None\"\n\n model = WhisperModel(model_size, device=device, compute_type=compute_type)\n\n # Transcribe the file using faster-whisper\n segments, info = model.transcribe(target_path, beam_size=beam_size)\n\n print(\n \"Detected language '%s' with probability %f\"\n % (info.language, info.language_probability)\n )\n\n translator = Translator(from_lang=info.language, to_lang=to_lang)\n # Translate the segments text\n if to_lang != None:\n segments_list = [\n {\n \"start\": segment.start,\n \"end\": segment.end,\n \"text\": translator.translate(segment.text),\n }\n for segment in segments\n ]\n else:\n segments_list = [\n {\"start\": segment.start, \"end\": segment.end, \"text\": segment.text}\n for segment in segments\n ]\n\n text = \",\".join([segment[\"text\"] for segment in segments_list])\n\n print(\"text\", text)\n print(\"segments_list\", segments_list)\n\n end_time = time.time() # 记录结束时间\n processing_time = end_time - start_time # 计算处理时间\n\n print(\"processing_time\", processing_time)\n\n return {\n \"text\": text,\n \"segments\": segments_list,\n \"language\": info.language,\n \"language_probability\": info.language_probability,\n \"processing_time\": processing_time, # 返回处理时间\n }\n\n\nclass base64DataTranscribe_Request(BaseModel):\n file: str\n model_size: str\n compute_type: str\n device: str = \"cuda\"\n to_lang: str = None\n beam_size: int = 5\n\n\n@app.post(\"/base64DataTranscribe\")\nasync def base64DataTranscribe(req: base64DataTranscribe_Request):\n start_time = time.time() # 记录开始时间\n print(\n f\"base64Str: {req.file[:50]}....\\n\",\n f\"to_lang: {req.to_lang}\\n\"\n f\"model_size: {req.model_size}\\n\"\n f\"compute_type: {req.compute_type}\\n\"\n f\"device: {req.device}\",\n )\n\n data = base64.b64decode(req.file)\n binary_data = io.BytesIO(data)\n\n model = WhisperModel(\n req.model_size, device=req.device, compute_type=req.compute_type\n )\n\n segments, info = model.transcribe(\n binary_data,\n beam_size=req.beam_size,\n )\n\n print(\n \"Detected language '%s' with probability %f\"\n % (info.language, info.language_probability)\n )\n\n translator = Translator(from_lang=info.language, to_lang=req.to_lang)\n if req.to_lang != None:\n segments_list = [\n {\n \"start\": segment.start,\n \"end\": segment.end,\n \"text\": translator.translate(segment.text),\n }\n for segment in segments\n ]\n else:\n segments_list = [\n {\"start\": segment.start, \"end\": segment.end, \"text\": segment.text}\n for segment in segments\n ]\n\n text = \",\".join([segment[\"text\"] for segment in segments_list])\n print(\"text\", text)\n print(\"segments_list\", segments_list)\n\n end_time = time.time() # 记录结束时间\n processing_time = end_time - start_time # 计算处理时间\n\n print(\"processing_time\", processing_time)\n\n return {\n \"text\": text,\n \"segments\": segments_list,\n \"language\": info.language,\n \"language_probability\": info.language_probability,\n \"processing_time\": processing_time, # 返回处理时间\n }\n\n\nclass Base64FastTranscribe_Request(BaseModel):\n file: str\n\n\n@app.post(\"/base64FastTranscribe\")\nasync def base64FastTranscribe(req: Base64FastTranscribe_Request):\n start_time = time.time() # 记录开始时间\n data = base64.b64decode(req.file)\n binary_data = io.BytesIO(data)\n\n model = WhisperModel(\"small\", device=\"cuda\", compute_type=\"int8\")\n\n segments, info = model.transcribe(\n binary_data, beam_size=1, without_timestamps=True, temperature=0, language=\"zh\"\n )\n\n text = \"\"\n for item in segments:\n text += item[2]\n\n end_time = time.time() # 记录结束时间\n processing_time = end_time - start_time # 计算处理时间\n\n return {\n \"text\": text,\n \"processing_time\": processing_time, # 返回处理时间\n }\n\n\nclass TranslationRequest(BaseModel):\n text: str\n to_lang: str\n from_lang: str\n\n\n@app.post(\"/translate\")\nasync def translate_text(req: TranslationRequest):\n # Translate the text to the specified language\n translator = Translator(from_lang=req.from_lang, to_lang=req.to_lang)\n translation = translator.translate(req.text)\n\n return {\"text\": translation}\n","repo_name":"frankwongWO/whisper-fastapi","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":6684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18973658935","text":"import numpy as np\n\nfrom app.neuron_layers.neuron_layer import NeuronLayer\nfrom app.neuron import Neuron\n\n\nclass OutputNeuronLayer(NeuronLayer):\n def __init__(self, num_neurons: int, inputs_layer_size: int):\n super().__init__(num_neurons, inputs_layer_size)\n self._exp_total_inputs = list()\n self._sum_exp_input = None\n\n def _softmax(self, inputs: list):\n outputs = list()\n for neuron in self._neurons:\n self._exp_total_inputs.append(np.exp(neuron.calculate_total_net_input(inputs)))\n\n self._sum_exp_input = sum(self._exp_total_inputs)\n for exp_input, neuron in zip(self._exp_total_inputs, self._neurons):\n neuron.output = exp_input / self._sum_exp_input\n outputs.append(neuron.output)\n\n return outputs\n\n def update_weights(self, targets: list, hidden_layer_outputs: list, learning_rate: float):\n error_derivatives_to_input = list()\n for neuron, target, input_exp in zip(self._neurons, targets, self._exp_total_inputs):\n error_derivative_to_output, output_derivative_to_input = self._calculate_derivatives(target,\n neuron,\n input_exp,\n len(targets))\n error_derivative_to_input = error_derivative_to_output * output_derivative_to_input\n error_derivatives_to_input.append(error_derivative_to_input)\n for i in range(len(hidden_layer_outputs)):\n delta_weight = error_derivative_to_input * hidden_layer_outputs[i].output\n neuron.set_weight(i, neuron.get_weight(i) - (learning_rate * delta_weight))\n\n return error_derivatives_to_input\n\n def _calculate_derivatives(self, target: int, neuron: Neuron, input_exp: float, layer_size: int):\n error_derivative_to_output = - (target / neuron.output + (1 - target) / (1 - neuron.output)) / layer_size\n output_derivative_to_input = (input_exp * (self._sum_exp_input - input_exp)) / (self._sum_exp_input ** 2)\n\n return error_derivative_to_output, output_derivative_to_input\n\n def feed_forward(self, inputs: list):\n outputs = self._softmax(inputs)\n return outputs\n","repo_name":"alekseyfa/CNN","sub_path":"app/neuron_layers/output_layer.py","file_name":"output_layer.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75000122504","text":"\"\"\"\n discord-bot-2 backend\n\n Router receiver and sender threads\n\n\"\"\"\nimport asyncio\nimport json\nimport logging\nimport secrets\nfrom hashlib import md5\n\nimport cachetools\nimport zmq\nimport zmq.asyncio\nfrom redis import Redis, RedisError, ConnectionError\n\nfrom .config import CONFIG\nfrom .timing import Timer\n\n\nclass Router:\n \"\"\"\n Handle zmq interface\n \"\"\"\n\n def __init__(self) -> None:\n self.__db = Redis(decode_responses=True)\n self.in_queue = \"job_queue\"\n self.out_queue = \"response_queue\"\n self.job_cache: cachetools.TTLCache = cachetools.TTLCache(1000, 60)\n\n ctx = zmq.asyncio.Context()\n self.__sck = ctx.socket(zmq.ROUTER)\n address = CONFIG.get(\"startup\", \"zmq_address\")\n self.__sck.bind(address)\n logging.info(\"Socket bound at %s\", address)\n self.__sck.setsockopt(zmq.RCVTIMEO, 500)\n self.__sck.setsockopt(zmq.LINGER, 1000)\n\n self.is_shutting_down = False\n self.client = None\n\n async def recv(self) -> None:\n \"\"\"\n Poll for received commands until shutdown\n \"\"\"\n while not self.is_shutting_down:\n try:\n msg = await self.__sck.recv_multipart()\n await self.put_in_queue(msg)\n except zmq.Again:\n pass\n except Exception as exc:\n logging.exception(exc)\n self.__sck.send_json({\"code\": 1})\n finally:\n await asyncio.sleep(0.01)\n\n async def put_in_queue(self, msg: list, max_attempts: int = 5) -> None:\n \"\"\"\n Puts a message in the input queue\n \"\"\"\n for attempt in range(max_attempts):\n try:\n job_id = md5(secrets.token_bytes(8)).hexdigest()\n job = [job_id, *[x.hex() for x in msg]]\n self.__db.rpush(self.in_queue, json.dumps(job))\n self.__db.expire(self.in_queue, 60)\n self.job_cache[job_id] = None # store ID as dict for faster lookup\n Timer.start(job_id)\n logging.info(\"[router] Queued job: %s\", job_id)\n return\n except (RedisError, ConnectionError):\n logging.exception(\n \"An error occurred in Redis, retrying (attempt %s of %s)\",\n attempt + 1,\n max_attempts,\n )\n await asyncio.sleep(0.2)\n\n raise Exception(f\"Redis connection failure, retried {max_attempts} times\")\n\n async def send(self) -> None:\n \"\"\"\n Check for completed jobs and send until shutdown\n \"\"\"\n while not self.is_shutting_down:\n try:\n work = self.get_from_queue()\n if work:\n job_id, msg = work\n await self.__sck.send_multipart(msg)\n logging.info(\"[router] Sent response, job took %sms\", Timer.stop(job_id))\n except Exception as exc:\n logging.exception(exc)\n self.__sck.send_json({\"code\": 1})\n finally:\n await asyncio.sleep(0.01)\n\n def get_from_queue(self) -> tuple[str, list] | None:\n \"\"\"\n Get job ID and zmq message from Redis\n \"\"\"\n msg = self.__db.lpop(self.out_queue)\n if msg:\n data = json.loads(msg)\n job_id = data[0]\n if job_id in self.job_cache:\n response = [bytes.fromhex(x) for x in data[1:]]\n del self.job_cache[job_id]\n return job_id, response\n logging.error(\"Job ID failed to validate. Fake job response?\")\n return None\n","repo_name":"joeggg/discord-bot-backend","sub_path":"bot_worker/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"43308022934","text":"import threading\n\nimport time\nfrom gi.repository import Gtk\n\nfrom ..ui.server.list_row import ListServerData\n\n\nclass Servers:\n\n def __init__(self, win, home, load, page, prev, back):\n self.win = win\n self.home = home\n self.load = load\n self.page = page\n self.prev = prev\n self.back = back\n\n self.load.call_when_server_created = self.server_created\n self.load.call_when_server_deleted = self.server_deleted\n self.load.call_when_server_updated = self.server_updated\n\n self.servers = Gtk.ListBox()\n self.servers.set_hexpand(True)\n self.servers.set_vexpand(True)\n self.servers.set_border_width(30)\n\n self.last_servers_update = time.time()\n\n def setup(self, grid, servers):\n\n search = self._search()\n grid.attach(search, 0, 2, 1, 1)\n\n servers_window = Gtk.ScrolledWindow()\n servers_window.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)\n servers_window.set_hexpand(True)\n servers_window.set_vexpand(True)\n\n self.servers.connect('row-activated', lambda widget, row: row.select_server())\n\n servers_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)\n servers_box.pack_start(self.servers, True, True, 0)\n servers_window.add(servers_box)\n grid.attach(servers_window, 0, 3, 1, 1)\n\n thread = threading.Thread(target=servers, args=(self.server_created,))\n thread.daemon = True\n thread.start()\n\n def _update_servers(self, button):\n if time.time() - self.last_servers_update > 2:\n\n for server in self.servers.get_children():\n thread = threading.Thread(target=self._update_server, args=(server,))\n thread.daemon = True\n thread.start()\n\n self.last_servers_update = time.time()\n\n def _search(self):\n searchbar = Gtk.SearchBar()\n searchentry = Gtk.SearchEntry()\n searchentry.set_size_request(500,-1)\n searchentry.set_hexpand(True)\n searchbar.set_hexpand(True)\n\n searchentry.connect(\"search-changed\", self._on_search_changed)\n searchbar.connect_entry(searchentry)\n searchbar.add(searchentry)\n searchbar.set_search_mode(True)\n\n searchbar.set_valign(Gtk.Align.CENTER)\n searchbar.set_halign(Gtk.Align.CENTER)\n return searchbar\n\n def _on_search_changed(self, searchentry):\n data = searchentry.get_text()\n\n def filter_func(row, data, notify_destroy):\n if data is '':\n return True\n if data in row.game_server.hostname.lower():\n return True\n if data in row.game_server.host.lower():\n return True\n if row.game_server.hostname is '-':\n return False\n if row.game_server.hostname is 'Error':\n return False\n if data in row.game_server.map.lower():\n return True\n return False\n\n self.servers.set_filter_func(filter_func, data, False)\n\n def _update_server(self, server):\n server.load.update_server_data(server.game_server)\n\n def server_created(self, server):\n self.servers.add(ListServerData(server, self.win, self.load, self.page, self.prev, self.back))\n\n def sort_func(row_1, row_2, data, notify_destroy):\n return row_1.game_server.players < row_2.game_server.players\n\n self.servers.set_sort_func(sort_func, None, False)\n self.servers.show_all()\n\n def server_deleted(self, delete_server):\n for server in self.servers:\n if server.game_server == delete_server:\n self.servers.remove(server)\n\n def server_updated(self, update_server):\n\n for server in self.servers:\n if server.game_server == update_server:\n server.update()\n\n def sort_func(row_1, row_2, data, notify_destroy):\n return row_1.game_server.players < row_2.game_server.players\n\n self.servers.set_sort_func(sort_func, None, False)","repo_name":"Goasd/Monni","sub_path":"monni/ui/servers.py","file_name":"servers.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27858809635","text":"import pygame\nfrom random import choice\nfrom math import sqrt\n\n\nclass Direction:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __eq__(self, other):\n return self.x == other.x and self.y == other.y\n\n def __repr__(self):\n return f'Direction({self.x}, {self.y})'\n\n def __str__(self):\n return f'{self.x:3}, {self.y:3}'\n\n def normalized(self):\n magnitude = sqrt(self.x ** 2 + self.y ** 2) + 0.0001 # no division by zero\n return Direction(self.x / magnitude, self.y / magnitude)\n\n def dot_product(self, other):\n return self.x * other.x + self.y * other.y\n\n def reverse(self):\n return Direction(-self.x, -self.y)\n\n\ndef closest_direction(displacement, directions):\n norm = displacement.normalized()\n return max(directions, key=lambda x: x.dot_product(norm))\n\n\nclass Puzzle:\n TILE_SIZE = 50\n SPACE_COLOR = 'black'\n TEXT_COLOR = 'black'\n TEXT_SIZE = 40\n SOLVED_COLOR = (100, 255, 100)\n BORDER_COLOR = 'black'\n BORDER_WIDTH = 5\n MOVES = {'left': Direction(-1, 0),\n 'right': Direction(1, 0),\n 'down': Direction(0, 1),\n 'up': Direction(0, -1)}\n SHUFFLE_COUNT = 50\n LEFT_CLICK = 1\n\n def __init__(self, width=4, height=4):\n self.width = abs(int(width))\n self.height = abs(int(height))\n # create board\n self.board = []\n number = 1\n for x in range(self.width):\n self.board.append([])\n for y in range(self.height):\n if number < self.width * self.height:\n self.board[x].append(str(number))\n else:\n self.board[x].append(' ')\n number += 1\n # keep track of space so it doesn't need to be found\n self.space = (self.width - 1, self.height - 1)\n # save solution\n self.solution = str(self)\n # save history\n self.history = []\n\n def __eq__(self, other):\n for x in range(self.width):\n for y in range(self.height):\n if self.board[x][y] != other.board[x][y]:\n return False\n return True\n\n def __repr__(self):\n return f'Puzzle({self.width}, {self.height})'\n\n def __str__(self):\n result = ''\n for x in range(self.width):\n for y in range(self.height):\n result += f'\\t{self.board[x][y]}'\n result += '\\n'\n return result\n\n def move(self, direction):\n \"\"\" Try making a move in the given direction.\n Return whether the move changed the board\"\"\"\n x, y = self.space\n # left\n if direction == self.MOVES['left']:\n # move space up in row\n if y < self.height - 1:\n self.board[x][y] = self.board[x][y + 1]\n self.board[x][y + 1] = ' '\n # update space\n self.space = (x, y + 1)\n # update history\n self.history.append(self.MOVES['left'])\n return True\n # right\n if direction == self.MOVES['right']:\n # move space down in row\n if y > 0:\n self.board[x][y] = self.board[x][y - 1]\n self.board[x][y - 1] = ' '\n # update space\n self.space = (x, y - 1)\n # update history\n self.history.append(self.MOVES['right'])\n return True\n # down\n if direction == self.MOVES['down']:\n # move space up in column\n if x > 0:\n self.board[x][y] = self.board[x - 1][y]\n self.board[x - 1][y] = ' '\n # update space\n self.space = (x - 1, y)\n # update history\n self.history.append(self.MOVES['down'])\n return True\n # up\n if direction == self.MOVES['up']:\n # move space down in column\n if x < self.width - 1:\n self.board[x][y] = self.board[x + 1][y]\n self.board[x + 1][y] = ' '\n # update space\n self.space = (x + 1, y)\n # update history\n self.history.append(self.MOVES['up'])\n return True\n return False\n\n def shuffle(self, move_count):\n \"\"\" Make move_count moves, making sure each move changes the board \"\"\"\n moves_made = 0\n while moves_made < move_count:\n moves_made += int(self.move(choice(list(self.MOVES.values()))))\n\n def is_solved(self):\n return str(self) == self.solution\n\n def prune_history(self):\n to_remove = []\n index = 0\n while index < len(self.history) - 1:\n # remove consecutive moves that are reverses\n if self.history[index] == self.history[index + 1].reverse():\n to_remove.extend([index, index + 1])\n index += 2\n else:\n index += 1\n for index in reversed(to_remove):\n self.history.pop(index)\n\n def solve(self):\n # shorten history where possible\n self.prune_history()\n # start from the most recent move\n for move in reversed(self.history):\n reversal = move.reverse()\n # make reverse move\n self.move(reversal)\n\n def show_history(self):\n for move in self.history:\n print(move)\n\n def draw_square(self, screen, x, y, color):\n # space\n if (x, y) == self.space:\n color = self.SPACE_COLOR\n if self.is_solved():\n color = self.SOLVED_COLOR\n # border\n pygame.draw.rect(screen, self.BORDER_COLOR,\n (y * self.TILE_SIZE, x * self.TILE_SIZE, self.TILE_SIZE, self.TILE_SIZE), self.BORDER_WIDTH)\n # background\n pygame.draw.rect(screen, color, (y * self.TILE_SIZE, x * self.TILE_SIZE, self.TILE_SIZE, self.TILE_SIZE), 0)\n # text\n myfont = pygame.font.SysFont('Times New Roman', self.TEXT_SIZE)\n textsurface = myfont.render(self.board[x][y], False, self.TEXT_COLOR)\n margin = (self.TILE_SIZE - self.TEXT_SIZE) // 2\n screen.blit(textsurface, (margin + self.TILE_SIZE * y, margin + self.TILE_SIZE * x))\n\n def draw(self, screen):\n for y in range(self.height):\n for x in range(self.width):\n number = self.board[x][y]\n # ignore space\n color = 'black'\n if number != ' ':\n color = tuple(\n [min(255, 255 * (int(number) + 5) // (self.width * self.height + 5))] * 3) # shade of grey\n self.draw_square(screen, x, y, color)\n\n def show(self):\n \"\"\" Displays a puzzle graphically. \"\"\"\n pygame.init()\n pygame.display.set_caption('Show 15 puzzle position')\n # img = pygame.image.load('icon.png')\n # pygame.display.set_icon(img)\n pygame.font.init()\n screen = pygame.display.set_mode((self.TILE_SIZE * self.width, self.TILE_SIZE * self.height))\n self.draw(screen)\n pygame.display.update()\n running = True\n while running:\n keys = pygame.key.get_pressed()\n redraw_needed = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n # quit\n running = False\n\n def play(self):\n pygame.init()\n pygame.display.set_caption('15 puzzle')\n pygame.font.init()\n pygame.mixer.init()\n # move_sound = pygame.mixer.Sound('move.wav')\n screen = pygame.display.set_mode((self.TILE_SIZE * self.height, self.TILE_SIZE * self.width))\n # img = pygame.image.load('icon.png')\n # pygame.display.set_icon(img)\n running = True\n initial_position = True\n while running:\n keys = pygame.key.get_pressed()\n redraw_needed = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n # mouse handling\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == self.LEFT_CLICK:\n # start of displacement\n pygame.mouse.get_rel()\n if event.type == pygame.MOUSEBUTTONUP:\n if event.button == self.LEFT_CLICK:\n # end of displacement\n movement = pygame.mouse.get_rel()\n displacement = Direction(*movement)\n self.move(closest_direction(displacement, list(self.MOVES.values())))\n redraw_needed = True\n # key handling\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n # quit\n running = False\n elif event.key == pygame.K_SPACE:\n # screenshot\n pygame.image.save(screen, \"capture.png\")\n elif event.key == pygame.K_h:\n # history\n self.show_history()\n elif event.key == pygame.K_r:\n # shuffle\n self.shuffle(self.SHUFFLE_COUNT)\n redraw_needed = True\n elif event.key == pygame.K_s:\n # solve\n self.solve()\n redraw_needed = True\n # movement\n elif event.key == pygame.K_UP:\n redraw_needed = self.move(self.MOVES['up'])\n elif event.key == pygame.K_DOWN:\n redraw_needed = self.move(self.MOVES['down'])\n elif event.key == pygame.K_LEFT:\n redraw_needed = self.move(self.MOVES['left'])\n elif event.key == pygame.K_RIGHT:\n redraw_needed = self.move(self.MOVES['right'])\n if initial_position or redraw_needed:\n # draw puzzle\n self.draw(screen)\n pygame.display.update()\n # play sound\n # move_sound.play()\n initial_position = False\n pygame.quit()\n\n\nif __name__ == '__main__':\n p = Puzzle(4,4)\n p.play()\n","repo_name":"Lucian1611/portfolio_projects_1","sub_path":"lucian_bidica/python_gui/game_of_15.py","file_name":"game_of_15.py","file_ext":"py","file_size_in_byte":10605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34409556903","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.optim import AdamW\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.nn import CrossEntropyLoss\nimport random as rd\nimport json\nfrom transformers import AutoModelForSeq2SeqLM, AutoTokenizer\nfrom argparse import ArgumentParser\nfrom tqdm import tqdm\nimport math\nimport time\nfrom datetime import datetime\nimport os\nfrom os.path import join, exists\nimport transformers\nimport pickle\nimport sys\nimport numpy as np\nimport logging\nimport warnings\n\n\n# In[2]:\n\n\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n\nparser = ArgumentParser(\"T5 persona\")\nparser.add_argument(\"--dumped_token\",type=str, default='/kaggle/input/t5-mengzi-new/couplet_json_new/')\nparser.add_argument(\"--batch_size\", type=int, default=4)\nparser.add_argument(\"--kaggle\",type=str, default='kaggle')\nparser.add_argument('--log_path', default='./train.log', type=str)\nparser.add_argument('--epochs', default=100, type=int, required=False, help='训练的最大轮次')\nparser.add_argument('--lr', default=4e-5, type=float, required=False, help='学习率')\nparser.add_argument('--eps', default=1.0e-09, type=float, required=False, help='衰减率')\nparser.add_argument('--warmup_steps', default=400, type=int, required=False, help='warmup steps ')\nparser.add_argument('--gradient_accumulation_steps', default=4, type=int, required=False, help='梯度积累')\nparser.add_argument('--max_grad_norm', default=2.0, type=float, required=False)\nparser.add_argument('--save_model_path', default='./model', type=str, required=False,\n help='模型输出路径')\nparser.add_argument('--pretrained_model', default='', type=str, required=False,\n help='预训练的模型的路径')\nparser.add_argument('--optimizer_state', default=None, type=str, required=False,\n help='optimizer的路径')\nparser.add_argument('--log_step', default=100, type=int, required=False, help='多少步汇报一次loss')\nparser.add_argument('--num_workers', type=int, default=2, help=\"dataloader加载数据时使用的线程数量\")\n\nargs = parser.parse_args([\"--kaggle\", 'kaggle'])\n\ndef prepare_data_batch(batch):\n response_input_ids = batch['response']['input_ids']\n response_input_ids[response_input_ids == 0] = -100\n return batch['query']['input_ids'], batch['query']['attention_mask'], response_input_ids\n\n\ndef calculate_acc(logit, labels, ignore_index=-100):\n logit = logit[..., :, :].contiguous().view(-1, logit.size(-1))\n labels = labels[..., :].contiguous().view(-1)\n _, logit = logit.max(dim=-1)\n non_pad_mask = labels.ne(ignore_index)\n n_correct = logit.eq(labels).masked_select(non_pad_mask).sum().item()\n n_word = non_pad_mask.sum().item()\n return n_correct, n_word\n\ndef get_logger(filename, verbosity=1, name=None):\n level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}\n formatter = logging.Formatter(\n \"[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s\"\n )\n logger = logging.getLogger(name)\n logger.setLevel(level_dict[verbosity])\n \n fh = logging.FileHandler(filename, \"w\")\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n \n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n \n return logger\n\nclass ConvAI2Dataset(torch.utils.data.Dataset):\n def __init__(self, queries, labels):\n self.queries = queries\n self.labels = labels\n \n \n def __getitem__(self, idx):\n query = {\n key: torch.tensor(val[idx])\n for key, val in self.queries.items()\n }\n response = {\n key: torch.tensor(val[idx])\n for key, val in self.labels.items()\n }\n return { 'query': query, 'response': response }\n\n def __len__(self):\n return len(self.labels['input_ids'])\n \ndef prepare_data_batch(batch):\n response_input_ids = batch['response']['input_ids']\n response_input_ids[response_input_ids == 0] = -100\n return batch['query']['input_ids'], batch['query']['attention_mask'], response_input_ids\n\n\ndef calculate_acc(logit, labels, ignore_index=-100):\n logit = logit[..., :, :].contiguous().view(-1, logit.size(-1))\n labels = labels[..., :].contiguous().view(-1)\n _, logit = logit.max(dim=-1)\n non_pad_mask = labels.ne(ignore_index)\n n_correct = logit.eq(labels).masked_select(non_pad_mask).sum().item()\n n_word = non_pad_mask.sum().item()\n return n_correct, n_word\n\ndef get_logger(filename, verbosity=1, name=None):\n level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}\n formatter = logging.Formatter(\n \"[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s\"\n )\n logger = logging.getLogger(name)\n logger.setLevel(level_dict[verbosity])\n \n fh = logging.FileHandler(filename, \"w\")\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n \n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n \n return logger\n\ndef load_dataset(logger,args):\n logger.info(f\"Load tokenized train & val dataset from {args.dumped_token}.\")\n path = args.dumped_token\n\n with open(path + 'train_query.json') as train_query, open(path + 'test_query.json') as val_query:\n print(\"Load train_query\")\n tmp = train_query.readline()\n train_query_tokenized = json.loads(tmp)\n print(\"Load val_query\")\n tmp = val_query.readline()\n val_query_tokenized = json.loads(tmp)\n\n with open(path + 'train_response.json') as train_response, open(path + 'test_response.json') as val_response:\n print(\"Load train_response\")\n tmp = train_response.readline()\n train_response_tokenized = json.loads(tmp)\n print(\"Load val_response\")\n tmp = val_response.readline()\n val_response_tokenized = json.loads(tmp)\n \n \n train_dataset = ConvAI2Dataset(train_query_tokenized,\n train_response_tokenized)\n train_loader = DataLoader(train_dataset,\n batch_size=args.batch_size,\n shuffle=True,\n drop_last=True,\n num_workers=args.num_workers\n )\n\n val_dataset = ConvAI2Dataset(val_query_tokenized,\n val_response_tokenized)\n val_loader = DataLoader(val_dataset,\n batch_size=args.batch_size,\n shuffle=True,\n drop_last=True)\n return train_loader,val_loader\n\ndef train_epoch(model, train_dataloader, optimizer, scheduler, logger,\n epoch, args):\n model.train()\n epoch_start_time = datetime.now()\n total_loss = 0 \n\n # epoch_correct_num:每个epoch中,output预测正确的word的数量\n # epoch_total_num: 每个epoch中,output预测的word的总数量\n epoch_correct_num, epoch_total_num = 0, 0\n\n for batch_idx, batch in enumerate(train_dataloader):\n # 捕获cuda out of memory exception\n try:\n input_ids, attention_mask, labels = prepare_data_batch(batch)\n input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)\n \n outputs = model(input_ids=input_ids,\n attention_mask=attention_mask,\n labels=labels)\n logits = outputs.logits\n loss = outputs.loss\n\n # 统计该batch的预测token的正确数与总数\n batch_correct_num, batch_total_num = calculate_acc(logits, labels, ignore_index=-100)\n # 统计该epoch的预测token的正确数与总数\n epoch_correct_num += batch_correct_num\n epoch_total_num += batch_total_num\n # 计算该batch的accuracy\n batch_acc = batch_correct_num / batch_total_num\n\n total_loss += loss.detach().item()\n \n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n \n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n if (batch_idx + 1) % args.gradient_accumulation_steps == 0:\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step()\n\n if (batch_idx + 1) % args.log_step == 0:\n logger.info(\n \"batch {} of epoch {}, loss {}, batch_acc {}, lr {}\".format(\n batch_idx + 1, epoch + 1, loss.detach().item()* args.gradient_accumulation_steps , batch_acc, scheduler.get_lr()))\n\n del input_ids, outputs\n\n except RuntimeError as exception:\n if \"out of memory\" in str(exception):\n logger.info(\"WARNING: ran out of memory\")\n if hasattr(torch.cuda, 'empty_cache'):\n torch.cuda.empty_cache()\n else:\n logger.info(str(exception))\n raise exception\n\n # 记录当前epoch的平均loss与accuracy\n epoch_mean_loss = total_loss / len(train_dataloader)\n epoch_mean_acc = epoch_correct_num / epoch_total_num\n logger.info(\n \"epoch {}: loss {}, predict_acc {}\".format(epoch + 1, epoch_mean_loss, epoch_mean_acc))\n\n logger.info('epoch {} finished'.format(epoch + 1))\n epoch_finish_time = datetime.now()\n logger.info('time for one epoch: {}'.format(epoch_finish_time - epoch_start_time))\n\n return epoch_mean_loss\n\ndef validate_epoch(model, validate_dataloader, logger, epoch, args):\n logger.info(\"start validating\")\n model.eval()\n epoch_start_time = datetime.now()\n total_loss = 0\n # 捕获cuda out of memory exception\n try:\n with torch.no_grad():\n for batch_idx, batch in enumerate(validate_dataloader):\n input_ids, attention_mask, labels = prepare_data_batch(batch)\n input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)\n outputs = model(input_ids=input_ids,\n attention_mask=attention_mask,\n labels=labels)\n logits = outputs.logits\n loss = outputs.loss\n\n total_loss += loss.detach().item()\n del input_ids, outputs\n\n # 记录当前epoch的平均loss\n epoch_mean_loss = total_loss / len(validate_dataloader)\n logger.info(\n \"validate epoch {}: loss {}\".format(epoch+1, epoch_mean_loss))\n epoch_finish_time = datetime.now()\n logger.info('time for validating one epoch: {}'.format(epoch_finish_time - epoch_start_time))\n return epoch_mean_loss\n except RuntimeError as exception:\n if \"out of memory\" in str(exception):\n logger.info(\"WARNING: ran out of memory\")\n if hasattr(torch.cuda, 'empty_cache'):\n torch.cuda.empty_cache()\n else:\n logger.info(str(exception))\n raise exception\n\ndef train(model, logger, train_dataloader, validate_dataloader, args):\n \n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.epochs\n logger.info(f't_total:{t_total}')\n \n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': 1e-2},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters,lr=args.lr, eps=args.eps)\n if args.optimizer_state is not None:\n optimizer.load_state_dict(torch.load(args.optimizer_state))\n \n scheduler = transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)\n logger.info('starting training')\n\n # 用于记录每个epoch训练和验证的loss\n train_losses, validate_losses = [], []\n # 记录验证集的最小loss\n best_val_loss = 10000\n # 开始训练\n for epoch in range(args.epochs):\n # ========== train ========== #\n train_loss = train_epoch(\n model=model, train_dataloader=train_dataloader,\n optimizer=optimizer, scheduler=scheduler,\n logger=logger, epoch=epoch, args=args)\n train_losses.append(train_loss)\n # ========== validate ========== #\n validate_loss = validate_epoch(\n model=model, validate_dataloader=validate_dataloader,\n logger=logger, epoch=epoch, args=args)\n validate_losses.append(validate_loss)\n\n # 保存当前困惑度最低的模型,困惑度低,模型的生成效果不一定会越好\n if validate_loss < best_val_loss:\n best_val_loss = validate_loss\n logger.info('saving current best model for epoch {}'.format(epoch + 1))\n model_path = join(args.save_model_path, 'best_model_in_epoch{}'.format(epoch + 1))\n if not os.path.exists(model_path):\n os.mkdir(model_path)\n model_to_save = model.module if hasattr(model, 'module') else model\n if epoch % 12 == 0:\n model_to_save.save_pretrained(model_path)\n torch.save(optimizer.state_dict(), './optimizer')\n \n\n logger.info('training finished')\n logger.info(\"train_losses:{}\".format(train_losses))\n logger.info(\"validate_losses:{}\".format(validate_losses))\ndef main():\n logger = get_logger(args.log_path)\n logger.info('start training!')\n logger.info('using device:{}'.format(device))\n\n # 创建模型的输出目录\n if not os.path.exists(args.save_model_path):\n os.mkdir(args.save_model_path)\n\n # 创建模型\n if args.pretrained_model: # 加载预训练模型\n model = AutoModelForSeq2SeqLM.from_pretrained(args.pretrained_model)\n else: # 初始化模型\n model = AutoModelForSeq2SeqLM.from_pretrained((\"Langboat/mengzi-t5-base\"))\n model = model.to(device)\n\n # 计算模型参数数量\n num_parameters = 0\n parameters = model.parameters()\n for parameter in parameters:\n num_parameters += parameter.numel()\n logger.info('number of model parameters: {}'.format(num_parameters))\n\n # 记录参数设置\n logger.info(\"args:{}\".format(args))\n\n # 加载训练集和验证集\n # ========= Loading Dataset ========= #\n train_loader, val_loader = load_dataset(logger, args)\n train(model, logger, train_loader, val_loader, args)\n\n\n# In[3]:\n\n\nmain()\n\n","repo_name":"coolwx/aiproject","sub_path":"finetune-mengzi-couplet.py","file_name":"finetune-mengzi-couplet.py","file_ext":"py","file_size_in_byte":14889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27156004099","text":"# Standard Library\nimport datetime\nimport importlib\nimport os\nimport sys\nfrom typing import Any, List, Mapping, Optional, Union\n\n# Third Party\nfrom loguru import logger\n\n# Local\nimport bel.belspec.crud\nimport bel.core.settings as settings\nimport bel.terms.terms\nfrom bel.lang.ast import BELAst\nfrom bel.schemas.bel import AssertionStr, Key\n\nsys.path.append(\"../\")\n\n\"\"\"BEL object\n\nThis manages the BEL AST object which is responsible for parsing, validation, (de)canonicalization and orthologization.\n\nThe BelEntity Object handles all of the NSArg entity manipulation (canonicalization, normalization, orthologization, etc).\n\"\"\"\n\n\nclass BEL(object):\n \"\"\"BEL Language object\n\n This object handles BEL statement/triple processing, parsing, (de)canonicalization,\n orthologization and other purposes.\n \"\"\"\n\n def __init__(self, assertion: AssertionStr = None, version: str = \"latest\") -> None:\n \"\"\"Initialize BEL object used for validating/processing/etc BEL statements\n\n Args:\n assertion: BEL Assertion\n version: BEL Version - defaults to settings.BEL_DEFAULT_VERSION or latest version\n \"\"\"\n\n self.assertion = assertion\n\n self.clean_assertion()\n\n self.version = bel.belspec.crud.check_version(version)\n\n # Validation error/warning messages\n # List[Tuple[str, str]], e.g. [('ERROR', 'this is an error msg'), ('WARNING', 'this is a warning'), ]\n self.validation_messages = []\n\n self.ast: Optional[BELAst] = None\n\n if self.assertion:\n self.ast = BELAst(assertion=assertion, version=version)\n\n def clean_assertion(self):\n \"\"\"Various tasks to clean the assertion component strings\"\"\"\n\n # Remove smart quotes\n if self.assertion:\n self.assertion.subject = (\n self.assertion.subject.replace(\"“\", '\"').replace(\"”\", '\"').strip()\n )\n self.assertion.relation = (\n self.assertion.relation.replace(\"“\", '\"').replace(\"”\", '\"').strip()\n )\n self.assertion.object = (\n self.assertion.object.replace(\"“\", '\"').replace(\"”\", '\"').strip()\n )\n self.assertion.entire = (\n self.assertion.entire.replace(\"“\", '\"').replace(\"”\", '\"').strip()\n )\n\n def parse(self, assertion: AssertionStr = None) -> \"BEL\":\n \"\"\"Parse BEL Assertion string\"\"\"\n\n # Add or override assertion string object in parse method\n if assertion is not None:\n self.assertion = assertion\n self.clean_assertion()\n\n self.ast = BELAst(assertion=assertion, version=self.version)\n\n return self\n\n def canonicalize(self) -> \"BEL\":\n \"\"\"\n Takes an AST and returns a canonicalized BEL statement string.\n\n Returns:\n BEL: returns self\n \"\"\"\n\n # TODO Need to order position independent args\n\n if self.ast:\n self.ast.canonicalize()\n\n return self\n\n def decanonicalize(self) -> \"BEL\":\n \"\"\"\n Takes an AST and returns a decanonicalized BEL statement string.\n\n Returns:\n BEL: returns self\n \"\"\"\n\n if self.ast:\n self.ast.decanonicalize()\n\n return self\n\n def orthologize(self, species_key: Key) -> \"BEL\":\n \"\"\"Orthologize BEL AST to given species_id\n\n Will return original entity (ns:value) if no ortholog found.\n\n Args:\n species_id (str): species id to convert genes/rna/proteins into\n\n Returns:\n BEL: returns self\n \"\"\"\n\n if self.ast:\n self.ast.orthologize(species_key)\n\n return self\n\n def to_string(self, fmt: str = \"medium\") -> str:\n \"\"\"Convert AST object to string\n\n Args:\n fmt (str): short, medium, long formatted BEL statements\n short = short function and short relation format\n medium = short function and long relation format\n long = long function and long relation format\n\n Returns:\n str: string version of BEL AST\n \"\"\"\n\n if self.ast:\n return f\"{self.ast.to_string(fmt=fmt)}\"\n\n def to_triple(self, fmt: str = \"medium\") -> dict:\n \"\"\"Convert AST object to BEL triple\n\n Args:\n fmt (str): short, medium, long formatted BEL statements\n short = short function and short relation format\n medium = short function and long relation format\n long = long function and long relation format\n\n Returns:\n dict: {'subject': <subject>, 'relation': <relations>, 'object': <object>}\n \"\"\"\n\n if self.ast:\n return self.ast.to_triple(fmt=fmt)\n else:\n return {}\n\n def print_tree(self) -> str:\n \"\"\"Convert AST object to tree view of BEL AST\n\n Returns:\n printed tree of BEL AST\n \"\"\"\n\n if self.ast:\n return self.ast.print_tree(ast_obj=self.ast)\n else:\n return \"\"\n\n def dump(self) -> str:\n \"\"\"Dump out the BEL object\"\"\"\n\n # Standard Library\n import textwrap\n\n s = f\"\"\"\n BEL Object dump:\n version: {self.version}\n assertion: {self.assertion.entire}\n species: {self.ast.species}\n ast: {self.ast.print_tree()}\n \"\"\"\n\n print(textwrap.dedent(s))\n","repo_name":"belbio/bel","sub_path":"bel/lang/belobj.py","file_name":"belobj.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"23682807904","text":"import sys\nimport time\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import Any, List, Optional, Tuple\n\nimport click\nimport typer\nfrom typer.core import TyperGroup\n\nfrom wheel2deb import logger as logging\nfrom wheel2deb.build import build_all_packages, build_packages\nfrom wheel2deb.context import load_configuration\nfrom wheel2deb.debian import convert_wheels\nfrom wheel2deb.logger import enable_debug\n\nlogger = logging.getLogger(__name__)\n\n\nclass DefaultCommandGroup(TyperGroup):\n \"\"\"\n Make it so that calling wheel2deb without a subcommand\n is equivalent to calling the default subcommand.\n \"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any):\n self.default_command = \"default\"\n self.ignore_unknown_options = True\n super().__init__(*args, **kwargs)\n\n def parse_args(self, ctx: click.Context, args: List[str]) -> List[str]:\n if not args:\n args.insert(0, self.default_command)\n return super().parse_args(ctx, args)\n\n def get_command(self, ctx: click.Context, cmd_name: str) -> click.Command | None:\n if cmd_name.startswith(\"-\") and cmd_name not in self.commands:\n cmd_name = self.default_command\n ctx.default_command = True # type: ignore\n return super().get_command(ctx, cmd_name)\n\n def resolve_command(\n self, ctx: click.Context, args: List[str]\n ) -> Tuple[str | None, click.Command | None, List[str]]:\n cmd_name, cmd, args = super().resolve_command(ctx, args)\n if hasattr(ctx, \"default_command\") and cmd_name:\n args.insert(0, cmd_name)\n return cmd_name, cmd, args\n\n\noption_verbose: bool = typer.Option(\n False,\n \"--verbose\",\n \"-v\",\n envvar=\"WHEEL2DEB_VERBOSE\",\n help=\"Enable more logs.\",\n callback=lambda v: enable_debug(v),\n)\n\noption_configuration: Optional[Path] = typer.Option(\n None,\n \"--config\",\n \"-c\",\n envvar=\"WHEEL2DEB_CONFIG\",\n help=\"Path to configuration file.\",\n)\n\noption_output_directory: Path = typer.Option(\n \"output\",\n \"--output-dir\",\n \"-o\",\n envvar=\"WHEEL2DEB_OUTPUT_DIR\",\n help=\"Directory where debian source packages are generated and built.\",\n)\n\noption_include_wheels: Optional[List[str]] = typer.Option(\n None,\n \"--include\",\n \"-i\",\n envvar=\"WHEEL2DEB_INCLUDE_WHEELS\",\n help=\"Only wheels with matching names will be converted\",\n)\n\noption_exclude_wheels: Optional[List[str]] = typer.Option(\n None,\n \"--exclude\",\n \"-e\",\n envvar=\"WHEEL2DEB_EXCLUDE_WHEELS\",\n help=\"Wheels with matching names will not be converted\",\n)\n\noption_search_paths: List[Path] = typer.Option(\n [Path(\".\")],\n \"--search-path\",\n \"-x\",\n envvar=\"WHEEL2DEB_SEARCH_PATHS\",\n help=\"Only blueprints with matching names will be taken into account\",\n)\n\n\noption_workers_count: int = typer.Option(\n 4,\n \"--workers\",\n \"-w\",\n envvar=\"WHEEL2DEB_WORKERS_COUNT\",\n help=\"Max number of source packages to build in parallel\",\n)\n\noption_force_build: bool = typer.Option(\n False, \"--force\", help=\"Build source package even if .deb already exists\"\n)\n\napp = typer.Typer(cls=DefaultCommandGroup)\n\n\n@contextmanager\ndef print_summary_and_exit():\n start_time = time.monotonic()\n yield\n logger.summary(\n f\"\\nWarnings: {logging.get_warning_counter()}. \"\n f\"Errors: {logging.get_error_counter()}. \"\n f\"Elapsed: {round(time.monotonic() - start_time, 3)}s.\"\n )\n # the return code is the number of errors\n sys.exit(logging.get_error_counter())\n\n\ndef filter_wheels(\n search_paths: List[Path],\n include_wheels: List[str] | None,\n exclude_wheels: List[str] | None,\n) -> List[Path]:\n # list all python wheels in search paths\n files = []\n for path in [Path(path) for path in search_paths]:\n files.extend(path.glob(\"*.whl\"))\n files = sorted(files, key=lambda x: x.name)\n\n filenames = [f.name for f in files]\n if not include_wheels:\n include_wheels = filenames\n\n # remove excluded wheels\n if exclude_wheels:\n include_wheels = list(filter(lambda x: x not in exclude_wheels, include_wheels))\n\n return [file for file in files if file.name in include_wheels]\n\n\n@app.command(help=\"Generate and build source packages.\")\ndef default(\n verbose: bool = option_verbose,\n configuration_path: Optional[Path] = option_configuration,\n output_directory: Path = option_output_directory,\n search_paths: List[Path] = option_search_paths,\n include_wheels: Optional[List[str]] = option_include_wheels,\n exclude_wheels: Optional[List[str]] = option_exclude_wheels,\n workers_count: int = option_workers_count,\n force_build: bool = option_force_build,\n) -> None:\n with print_summary_and_exit():\n settings = load_configuration(configuration_path)\n wheel_paths = filter_wheels(search_paths, include_wheels, exclude_wheels)\n packages = convert_wheels(settings, output_directory, wheel_paths)\n build_packages([p.root for p in packages], workers_count, force_build)\n\n\n@app.command(help=\"Convert wheels in search paths to debian source packages\")\ndef convert(\n verbose: bool = option_verbose,\n configuration_path: Optional[Path] = option_configuration,\n output_directory: Path = option_output_directory,\n search_paths: List[Path] = option_search_paths,\n include_wheels: Optional[List[str]] = option_include_wheels,\n exclude_wheels: Optional[List[str]] = option_exclude_wheels,\n) -> None:\n with print_summary_and_exit():\n settings = load_configuration(configuration_path)\n wheel_paths = filter_wheels(search_paths, include_wheels, exclude_wheels)\n convert_wheels(settings, output_directory, wheel_paths)\n\n\n@app.command(help=\"Build debian packages from source packages.\")\ndef build(\n verbose: bool = option_verbose,\n output_directory: Path = option_output_directory,\n workers_count: int = option_workers_count,\n force_build: bool = option_force_build,\n) -> None:\n with print_summary_and_exit():\n build_all_packages(output_directory, workers_count, force_build)\n\n\ndef main() -> None:\n app()\n","repo_name":"upciti/wheel2deb","sub_path":"src/wheel2deb/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"81"} +{"seq_id":"18220590662","text":"import pandas\n\nfrom viewer.apps.analyzer.function.file import FileFunction\n\n\nclass DataFrameFunction(object):\n\n COMPANY_NAME_REPLACE_KEYWORD = '{COMPANY_NAME}'\n ORIGINAL_FOLDER = f'apps/analyzer/data/{COMPANY_NAME_REPLACE_KEYWORD}/original'\n PROCESSED_FOLDER = f'apps/analyzer/data/{COMPANY_NAME_REPLACE_KEYWORD}/processed'\n MERGED_FOLDER = f'apps/analyzer/data/{COMPANY_NAME_REPLACE_KEYWORD}/merged'\n MERGED_CSV_FOLDER = f'apps/analyzer/data/{COMPANY_NAME_REPLACE_KEYWORD}/merged_csv'\n\n @classmethod\n def get_data_frame_from_merged_pkl(cls, root_path, company_name):\n merged_pkl_path = FileFunction.get_merged_pkl_path(\n root_path,\n company_name\n )\n data_frame = pandas.read_pickle(merged_pkl_path)\n return data_frame\n\n @classmethod\n def get_data_frame_from_pkl(cls, original_pkl_path):\n return pandas.read_pickle(original_pkl_path)\n\n @classmethod\n def merge_japan_data(cls, root_path, company_name, merged_pkl_path_list):\n\n data_frames = []\n for merged_pkl_path in merged_pkl_path_list:\n data_frame = pandas.read_pickle(merged_pkl_path)\n data_frames.append(data_frame)\n\n merged_data_frame = pandas.concat(data_frames).sort_values(by='date_time', ascending=True).reset_index()\n merged_data_frame = merged_data_frame.groupby(['date_time']).sum()\n merged_data_frame['date_time'] = merged_data_frame.index\n # timeを落とすためにいったんdateにしている。\n merged_data_frame['date'] = pandas.to_datetime(merged_data_frame['date_time'].dt.date, format='%Y/%m/%d %H:%M')\n # floatで統一してroundする。\n merged_data_frame = cls.to_float_and_round(merged_data_frame)\n # 時系列データを処理する様々な機能を使えるようにするためDatetimeIndexにする。\n merged_data_frame.set_index('date_time', inplace=True)\n print(merged_data_frame)\n\n merged_pkl_path = FileFunction.get_merged_pkl_path(\n root_path,\n company_name\n )\n merged_data_frame.to_pickle(merged_pkl_path)\n\n return merged_pkl_path\n\n @classmethod\n def merge_ex_data(cls, processed_pkl_paths, root_path, company_name):\n\n data_frames = []\n for processed_pkl_path in processed_pkl_paths:\n data_frame = pandas.read_pickle(processed_pkl_path)\n data_frames.append(data_frame)\n\n merged_data_frame = pandas.concat(data_frames).sort_values(by='date_time', ascending=True).reset_index()\n del merged_data_frame['index']\n\n merged_pkl_path = FileFunction.get_merged_pkl_path(\n root_path,\n company_name\n )\n\n print(company_name)\n print(merged_data_frame[['date', 'time', 'demand', 'company', 'thermal', 'solar', 'total_supply_capacity']])\n\n # 日付にしておいた方が使いやすいので変換する。\n merged_data_frame['date'] = pandas.to_datetime(merged_data_frame['date'])\n\n # NaNなどは、0扱いにする。\n merged_data_frame = cls.__convert_null_value_to_zero(merged_data_frame)\n\n # floatで統一してroundする。\n merged_data_frame = cls.to_float_and_round(merged_data_frame)\n\n # 時系列データを処理する様々な機能を使えるようにするためDatetimeIndexにする。\n merged_data_frame.set_index('date_time', inplace=True)\n merged_data_frame.to_pickle(merged_pkl_path)\n\n return merged_pkl_path\n\n @classmethod\n def __convert_null_value_to_zero(cls, result):\n result['demand'] = result['demand'].astype(str).str.replace('nan', '0')\n result['nuclear'] = result['nuclear'].astype(str).str.replace('nan', '0')\n result['thermal'] = result['thermal'].astype(str).str.replace('nan', '0')\n result['hydro'] = result['hydro'].astype(str).str.replace('nan', '0')\n result['geothermal'] = result['geothermal'].astype(str).str.replace('nan', '0')\n result['biomass'] = result['biomass'].astype(str).str.replace('nan', '0')\n result['solar'] = result['solar'].astype(str).str.replace('nan', '0')\n result['solar_output_control'] = result['solar_output_control'].astype(str).str.replace('nan', '0')\n result['wind'] = result['wind'].astype(str).str.replace('nan', '0')\n result['wind_output_control'] = result['wind_output_control'].astype(str).str.replace('nan', '0')\n result['pumping'] = result['pumping'].astype(str).str.replace('nan', '0')\n result['interconnection'] = result['interconnection'].astype(str).str.replace('nan', '0')\n result['total_supply_capacity'] = result['total_supply_capacity'].astype(str).str.replace('nan', '0')\n return result\n\n @classmethod\n def to_float_and_round(cls, result):\n result['demand'] = result['demand'].astype(float).round(1)\n result['nuclear'] = result['nuclear'].astype(float).round(1)\n result['thermal'] = result['thermal'].astype(float).round(1)\n result['hydro'] = result['hydro'].astype(float).round(1)\n result['geothermal'] = result['geothermal'].astype(float).round(1)\n result['biomass'] = result['biomass'].astype(float).round(1)\n result['solar'] = result['solar'].astype(float).round(1)\n result['solar_output_control'] = result['solar_output_control'].astype(float).round(1)\n result['wind'] = result['wind'].astype(float).round(1)\n result['wind_output_control'] = result['wind_output_control'].astype(float).round(1)\n result['pumping'] = result['pumping'].astype(float).round(1)\n result['interconnection'] = result['interconnection'].astype(float).round(1)\n result['total_supply_capacity'] = result['total_supply_capacity'].astype(float).round(1)\n return result\n\n @classmethod\n def create_date_and_time_from_datetime(cls, data_frame):\n data_frame[\"split\"] = data_frame[\"date_time\"].str.split(\" \")\n data_frame[\"date\"] = data_frame[\"split\"].str.get(0)\n data_frame[\"time\"] = data_frame[\"split\"].str.get(1)\n del data_frame[\"split\"]\n\n @classmethod\n def get_total_supply_capacity(cls, data_frame):\n\n sum_target_fields = [\n 'nuclear',\n 'thermal',\n 'hydro',\n 'geothermal',\n 'biomass',\n 'solar',\n 'solar_output_control',\n 'wind',\n 'wind_output_control',\n 'pumping',\n 'interconnection'\n ]\n data_frame['total_supply_capacity'] = data_frame[sum_target_fields].sum(axis=1)\n return data_frame['total_supply_capacity']\n\n @classmethod\n def to_mwh(cls, data_frame):\n target_fields = [\n 'demand',\n 'nuclear',\n 'thermal',\n 'hydro',\n 'geothermal',\n 'biomass',\n 'solar',\n 'solar_output_control',\n 'wind',\n 'wind_output_control',\n 'pumping',\n 'interconnection',\n 'total_supply_capacity'\n ]\n transform_value = 10\n for target_field in target_fields:\n data_frame[target_field] = data_frame[target_field] * transform_value\n\n @classmethod\n def generate_data_time_field(cls, data_frame):\n data_frame['date_time'] = data_frame['date'] + ' ' + data_frame['time']\n data_frame['date_time'] = data_frame['date_time'].astype(str).str.replace(' ', ' ')\n data_frame['date_time'] = pandas.to_datetime(data_frame['date_time'], format='%Y/%m/%d %H:%M')\n","repo_name":"TsJazz27Sumin/vedas","sub_path":"viewer/apps/analyzer/function/dataframe.py","file_name":"dataframe.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"13819255707","text":"\n# coding: utf-8\n\n# In[39]:\n\nimport scipy as sp\nfrom scipy.spatial.distance import squareform, pdist\n\n# paralelismo..\nfrom multiprocessing import Pool\nfrom multiprocessing.dummy import Pool as ThreadPool \nfrom functools import partial\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import StratifiedKFold\nimport time\nimport math\n\n# para testes\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import zero_one_loss, accuracy_score\n\n# classificador 2.2\nimport parzenFunction as pz\n\n#graficos\nimport matplotlib\n\n#grafico distribuicao\nimport scipy.stats as stats\nimport pylab as pl\ndef lerDados(base):\n if (base == 1):\n dfac = pd.read_csv('dados/mfeat-fac', delim_whitespace=True, header=None)\n return dfac, \"mfeat-fac\"\n elif (base == 2):\n dfou = pd.read_csv('dados/mfeat-fou', delim_whitespace=True, header=None)\n return dfou, \"mfeat-fou\"\n elif (base == 3):\n dkar = pd.read_csv('dados/mfeat-kar', delim_whitespace=True, header=None)\n return dkar, \"mfeat-kar\"\n\n \n#importação dos dados\n#dfac = pd.read_csv('dados/mfeat-fac', delim_whitespace=True, header=None)\n#dfou = pd.read_csv('dados/mfeat-fou', delim_whitespace=True, header=None)\n#dkar = pd.read_csv('dados/mfeat-kar', delim_whitespace=True, header=None)\n\n\n\n# In[40]:\n\n#cria array com 2000 elementos, onde os 200 primmeiros elementos possuem a classe 0, os proximos 200\n#elementos possuem a classe 1, ...\ndef preparaClasses():\n classes = np.array([])\n for i in range (2000):\n classes = np.append(classes,int(i/200))\n classes[198:203]\n return classes\n\n\n# In[41]:\n\n# ordena a matriz com os dados do array\n# número de linhas da matriz deve ser igual ao número de elementos do array\n# baseado no tutorial pandas \n# http://nbviewer.jupyter.org/urls/bitbucket.org/hrojas/learn-pandas/raw/master/lessons/02%20-%20Lesson.ipynb\n#def ordenaMatrizPorClasse(matriz,array):\n# c = pd.DataFrame(matriz)\n# c['classe'] = array\n# Sorted = c.sort_values(['classe'], ascending=True)\n# #del Sorted['novacoluna'] # remove a coluna\n# return Sorted\n\n\n# In[42]:\n\ndef classificadorJanelaParzen(dadosTreino, dadosTeste):\n\n treino = np.array(dadosTreino)\n teste = np.array(dadosTeste)\n colunaClasse = dadosTreino.shape[1] - 1\n \n \n treinoClasse0 = treino[ treino[:,colunaClasse] == 0] \n treinoClasse1 = treino[ treino[:,colunaClasse] == 1] \n treinoClasse2 = treino[ treino[:,colunaClasse] == 2] \n treinoClasse3 = treino[ treino[:,colunaClasse] == 3] \n treinoClasse4 = treino[ treino[:,colunaClasse] == 4] \n treinoClasse5 = treino[ treino[:,colunaClasse] == 5] \n treinoClasse6 = treino[ treino[:,colunaClasse] == 6] \n treinoClasse7 = treino[ treino[:,colunaClasse] == 7] \n treinoClasse8 = treino[ treino[:,colunaClasse] == 8] \n treinoClasse9 = treino[ treino[:,colunaClasse] == 9] \n \n # eliminar a coluna com as classes\n treinoClasse0 = treinoClasse0[:,:colunaClasse]\n treinoClasse1 = treinoClasse1[:,:colunaClasse]\n treinoClasse2 = treinoClasse2[:,:colunaClasse]\n treinoClasse3 = treinoClasse3[:,:colunaClasse]\n treinoClasse4 = treinoClasse4[:,:colunaClasse]\n treinoClasse5 = treinoClasse5[:,:colunaClasse]\n treinoClasse6 = treinoClasse6[:,:colunaClasse]\n treinoClasse7 = treinoClasse7[:,:colunaClasse]\n treinoClasse8 = treinoClasse8[:,:colunaClasse]\n treinoClasse9 = treinoClasse9[:,:colunaClasse] \n\n listaProbabilidadesTeste = []\n for i in range (len(teste)):\n \n classeCorreta = teste[i][colunaClasse]\n \n # tira a ultima coluna, que é a coluna da classe\n linhaTeste = teste[i, :colunaClasse] \n \n # classficacao...\n probC0 = pz.parzen(linhaTeste, treinoClasse0)\n probC1 = pz.parzen(linhaTeste, treinoClasse1)\n probC2 = pz.parzen(linhaTeste, treinoClasse2)\n probC3 = pz.parzen(linhaTeste, treinoClasse3)\n probC4 = pz.parzen(linhaTeste, treinoClasse4)\n probC5 = pz.parzen(linhaTeste, treinoClasse5)\n probC6 = pz.parzen(linhaTeste, treinoClasse6)\n probC7 = pz.parzen(linhaTeste, treinoClasse7)\n probC8 = pz.parzen(linhaTeste, treinoClasse8)\n probC9 = pz.parzen(linhaTeste, treinoClasse9)\n resultado = [probC0, probC1, probC2, probC3, probC4, probC5, probC6, probC7, probC8, probC9]\n listaProbabilidadesTeste.append(resultado)\n\n return listaProbabilidadesTeste\n\n\n# In[43]:\n\ndef classificadorJanelaParzenClassificar(dadosTreino, classesTreino, arrayDadosClassificar):\n treino = np.array(dadosTreino)\n #teste = np.array(dadosTeste)\n colunaClasse = dadosTreino.shape[1] - 1\n \n treinoClasse0 = treino[ treino[:,colunaClasse] == 0] \n treinoClasse1 = treino[ treino[:,colunaClasse] == 1] \n treinoClasse2 = treino[ treino[:,colunaClasse] == 2] \n treinoClasse3 = treino[ treino[:,colunaClasse] == 3] \n treinoClasse4 = treino[ treino[:,colunaClasse] == 4] \n treinoClasse5 = treino[ treino[:,colunaClasse] == 5] \n treinoClasse6 = treino[ treino[:,colunaClasse] == 6] \n treinoClasse7 = treino[ treino[:,colunaClasse] == 7] \n treinoClasse8 = treino[ treino[:,colunaClasse] == 8] \n treinoClasse9 = treino[ treino[:,colunaClasse] == 9] \n \n # eliminar a coluna com as classes\n treinoClasse0 = treinoClasse0[:,:colunaClasse]\n treinoClasse1 = treinoClasse1[:,:colunaClasse]\n treinoClasse2 = treinoClasse2[:,:colunaClasse]\n treinoClasse3 = treinoClasse3[:,:colunaClasse]\n treinoClasse4 = treinoClasse4[:,:colunaClasse]\n treinoClasse5 = treinoClasse5[:,:colunaClasse]\n treinoClasse6 = treinoClasse6[:,:colunaClasse]\n treinoClasse7 = treinoClasse7[:,:colunaClasse]\n treinoClasse8 = treinoClasse8[:,:colunaClasse]\n treinoClasse9 = treinoClasse9[:,:colunaClasse] \n\n \n \n # classficacao...\n probC0 = pz.parzen(arrayDadosClassificar, treinoClasse0)\n probC1 = pz.parzen(arrayDadosClassificar, treinoClasse1)\n probC2 = pz.parzen(arrayDadosClassificar, treinoClasse2)\n probC3 = pz.parzen(arrayDadosClassificar, treinoClasse3)\n probC4 = pz.parzen(arrayDadosClassificar, treinoClasse4)\n probC5 = pz.parzen(arrayDadosClassificar, treinoClasse5)\n probC6 = pz.parzen(arrayDadosClassificar, treinoClasse6)\n probC7 = pz.parzen(arrayDadosClassificar, treinoClasse7)\n probC8 = pz.parzen(arrayDadosClassificar, treinoClasse8)\n probC9 = pz.parzen(arrayDadosClassificar, treinoClasse9)\n \n resultado = [probC0, probC1, probC2, probC3, probC4, probC5, probC6, probC7, probC8, probC9]\n \n return resultado\n\n\n# In[44]:\n\ndef gerarGraficoResultado(nomeDados, resultado, estimativaPontual):\n resultadoOrdenado = np.sort(resultado)\n plt.figure(figsize=(12,7))\n plt.axis([0,300,0,1])\n plt.plot(resultado)\n plt.ylabel(\"Precisão (accuracy)\")\n plt.xlabel(\"Folds\")\n plt.title(\"Base de dados: \" + nomeDados)\n plt.show()\n \n\n\n# In[45]:\n\n# adiciona uma coluna em matrizColuna, referente às classes \ndef insereColunaClasses(matriz, classes):\n indiceColunaClasses = len(matriz.columns)\n matriz[indiceColunaClasses] = classes\n return matriz, indiceColunaClasses\n \n\n\n# In[46]:\n\n# embaralha os dados\n# encontra 10 folds\n# separa treina, classifica e encontra a taxa de acerto \ndef core(nomeDados, matrizDados, classes, rodadas=1):\n\n saida = []\n matrizDados, indiceColunaClasses = insereColunaClasses(matrizDados.copy(), classes)\n print(\"[\" + nomeDados + \"] Número de Rodadas = \" + str(rodadas))\n \n \n for i in range(rodadas):\n \n # embaralha a matriz de dados e as classes ao mesmo tempo\n dadosEmbaralhados, classesEmbaralhadas = shuffle(matrizDados, classes, random_state=i)\n\n # 10 folds\n skf = StratifiedKFold(n_splits=10)\n folds = skf.split(dadosEmbaralhados, classesEmbaralhadas)\n z = 0\n \n for indicesTreino, indicesTeste in folds: \n \n dadosTreino = np.array(dadosEmbaralhados.iloc[indicesTreino])\n dadosTeste = np.array(dadosEmbaralhados.iloc[indicesTeste]) \n classesTreino = np.array(classesEmbaralhadas[indicesTreino])\n classesTeste = np.array(classesEmbaralhadas[indicesTeste])\n \n # verifica se as classes estao corretas\n for a in range(len(dadosTreino)):\n if (dadosTreino[a][indiceColunaClasses] != classesTreino[a]):\n print(\"Erro!\")\n for b in range(len(dadosTeste)):\n if (dadosTeste[b][indiceColunaClasses] != classesTeste[b]):\n print(\"Erro!\")\n \n accuracy = classificadorJanelaParzen(dadosTreino, classesTreino, dadosTeste, classesTeste)\n saida.append(accuracy) \n print(\"[\" + nomeDados + \"] Rodada \" + str(i) + \", fold \"+ str(z) + \" concluido. Accuracy = \" + str(accuracy))\n z = z + 1\n \n # ESTIMATIVA PONTUAL\n media = np.mean(saida) \n return saida, media\n\n# Classifica e mostra o resultado\ndef preCore(nomeDados, dados, classes, rodadas):\n \n # medir tempo \n inicio = time.time()\n print(\"[\" + nomeDados + \"] Iniciando classificação \")\n\n # executa a classificação\n resultado, estimativaPontual = core(nomeDados, dados, classes, rodadas)\n\n # gera o grafico\n gerarGraficoResultado(nomeDados, resultado, estimativaPontual)\n print(\"[\" + str(nomeDados) + \"] *** Estimativa Pontual (média) = \" + str(estimativaPontual))\n \n # imprime os dados\n print(\"[\" + str(nomeDados) + \"] *** Resultados: \")\n print(resultado)\n print(\"-----------------------\")\n \n # calculo do tempo de processamento\n fim = time.time()\n total = fim - inicio\n print(\"\\n[\" + nomeDados + \"] Fim - Tempo de Execução = \" + str(total) + \"\\n\") \n \n return resultado, estimativaPontual\n\ndef iniciaParalelismo(rodadas, base):\n classes = preparaClasses()\n dados, nome = lerDados(base)\n print(\"inicializando processamento paralelo da matriz '\" + str(nome) + \"', com dimensões = \" + str(dados.shape))\n return preCore(nome, dados, classes, rodadas)\n\n# rodar o treinamento e teste, apresentando o gráfico da precisão\ndef treinoETeste():\n bases = [1,2,3]\n pool = Pool(4)\n rodadas = 30\n func = partial(iniciaParalelismo, rodadas)\n pool.map(func, bases)\n pool.close()\n pool.join()\n \n \n\n\n# def classificarUmElemento(base, indiceClassificar):\n# rodadas = 2\n# classes = preparaClasses()\n# resultado = classificadorJanelaParzenClassificar(base,classes,indiceClassificar)\n# print(resultado)\n# print(\"média\")\n# mediaColunas = np.mean(resultado, axis=0)\n# return mediaColunas\n# #print(mediaColunas)\n#","repo_name":"mrcferro/am","sub_path":"questao-02iii/parzen.py","file_name":"parzen.py","file_ext":"py","file_size_in_byte":10815,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25518560668","text":"from sys import stdin\ninput = stdin.readline\n\ndef solution(A):\n A += [1]\n fibo = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181, 6765, 10946, 17711, 28657, 46368, 75025]\n frog = [0 for _ in range(len(A))]\n\n for i in range(len(A)):\n if A[i]==1:\n for j in range(len(fibo)):\n if i-fibo[j]<-1: continue\n if i-fibo[j]==-1: frog[i] = 1\n elif 0<=i-fibo[j]<len(frog) and frog[i-fibo[j]]:\n if frog[i]: frog[i] = min(frog[i], frog[i-fibo[j]]+1)\n else: frog[i] = frog[i-fibo[j]]+1\n\n if frog[-1]: return frog[-1]\n else: return -1\n","repo_name":"sixinchnails/Codility","sub_path":"Medium/FibFrog.py","file_name":"FibFrog.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43302516587","text":"from typing import List\n\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n profit = 0\n bought = None\n op = 'buy'\n if len(prices) == 0:\n return 0\n n = prices[0]\n for i in range(1, len(prices)):\n if op == 'buy' and n < prices[i]:\n bought = prices[i - 1]\n op = 'sell'\n elif op == 'sell' and n > prices[i]:\n sold = prices[i - 1]\n profit += sold - bought\n bought = None\n op = 'buy'\n n = prices[i]\n if bought is not None:\n sold = prices[-1]\n profit += sold - bought\n return profit\n\n def maxProfitDivideAndConq(self, prices: List[int]) -> int:\n # 10.11.2021\n @lru_cache()\n def rec(start: int, end: int) -> int:\n nonlocal prices\n N = (end + 1) - start\n mid = (end + start) // 2\n if N <= 2:\n return max(0, prices[end] - prices[start])\n return rec(start, mid) + rec(mid, end)\n\n return rec(0, len(prices) - 1)\n","repo_name":"vaiol/leetcode2","sub_path":"src/122/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9660682483","text":"from torchvision import datasets, transforms\nimport os\n\n\ndef preprocess_data() -> datasets.ImageFolder:\n # Normalize images with values calculated from ImageNet\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n # Apply transforms\n transform = transforms.Compose([\n transforms.Resize((64, 64)), # Resize the short side of the image to 64 keeping aspect ratio\n transforms.ToTensor(), # Convert image to PyTorch tensor\n normalize, # Normalize image\n ])\n\n train_dataset = datasets.ImageFolder(root=os.path.join(os.getcwd(), 'data/train'), transform=transform)\n\n return train_dataset\n\n\nif __name__ == \"__main__\":\n preprocess_data()\n","repo_name":"VanekPetr/ResNet-9","sub_path":"data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18205179827","text":"# tim sort 참고, flake8 코드 스타일 적용\nimport time\n\n\ndef insertion_sort(arr, start, end): # in-place / stable\n for i in range(start + 1, end):\n key = arr[i]\n j = i - 1\n\n while j >= start and arr[j] > key:\n arr[j + 1] = arr[j]\n j -= 1\n arr[j + 1] = key\n\n\ndef merge(arr, first, midpoint, last):\n merge_arr = []\n start = first\n mid = midpoint\n\n while start < midpoint and mid < last:\n if arr[start] < arr[mid]:\n merge_arr.append(arr[start])\n start += 1\n else:\n merge_arr.append(arr[mid])\n mid += 1\n\n while start < midpoint:\n merge_arr.append(arr[start])\n start += 1\n\n while mid < last:\n merge_arr.append(arr[mid])\n mid += 1\n\n for i in range(first, last):\n arr[i] = merge_arr[i - first]\n\n\ndef merge_sort(arr, first, last):\n if last - first <= 32:\n insertion_sort(arr, first, last)\n else:\n midpoint = (first + last) // 2\n merge_sort(arr, first, midpoint)\n merge_sort(arr, midpoint, last)\n merge(arr, first, midpoint, last)\n\n\n# arr = [i for i in range(1000000, 0, - 1)]\narr = [i for i in range(100_000, 0, -1)]\nstart = time.time()\nmerge_sort(arr, 0, len(arr))\nend = time.time()\n\nprint(arr)\nprint(end - start)\n","repo_name":"heejun32/Algorithm","sub_path":"Study/merge sort/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34956538817","text":"import os\nimport sys\nimport json\nimport numpy as np\nimport skimage.draw\nfrom skimage.filters import unsharp_mask\n# import imgaug # should augment this improt as well haha\nimport time\n# Root directory of project\nROOT_DIR = os.path.abspath(\"../../\")\n\n# Import Mask RCNN and ESRGAN\nsys.path.append(ROOT_DIR) # To find local version of the library\nsys.path.append(os.path.join(os.path.abspath('.'), 'hent-AI/ColabESRGAN/'))\nfrom mrcnn.config import Config\nfrom mrcnn import model as modellib, utils\n# It's too late to undo this now\nfrom cv2 import imshow, waitKey, multiply, add, erode, VideoCapture, Canny, cvtColor,COLOR_GRAY2RGB, imdecode, CAP_PROP_FRAME_COUNT, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FRAME_WIDTH, CAP_PROP_FPS, VideoWriter, VideoWriter_fourcc, resize, INTER_LANCZOS4, INTER_AREA, GaussianBlur, filter2D, bilateralFilter, blur\nimport ColabESRGAN.test\n# Adatptive mosaic granularity\nfrom green_mask_project_mosaic_resolution import get_mosaic_res\n\nfrom PIL import Image\n\nfrom NoCensoredRegionsFoundError import NoCensoredRegionsFoundError\n\nDEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Path to trained weights\nWEIGHTS_PATH = os.path.join(ROOT_DIR, \"weights.h5\")\n\n# taking this from hentai to avoid import\nclass HentaiConfig(Config):\n \"\"\"Configuration for training on the toy dataset.\n Derives from the base Config class and overrides some values.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"hentai\"\n\n # We use a GPU with 12GB memory, which can fit two images.\n # Adjust down if you use a smaller GPU.\n IMAGES_PER_GPU = 1\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 1 + 1\n\n # Number of training steps per epoch, equal to dataset train size\n STEPS_PER_EPOCH = 1490\n\n # Skip detections with < 75% confidence\n DETECTION_MIN_CONFIDENCE = 0.75\n\n# Detector class. Handles detection and potentially esr decensoring. For now, will house an ESR instance at startup\nclass Detector():\n # at startup, dont create model yet\n def __init__(self, weights_path):\n class InferenceConfig(HentaiConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n self.config = InferenceConfig()\n\n self.weights_path = weights_path\n\n try:\n self.out_path = os.path.join(os.path.abspath('.'), \"ESR_temp/ESR_out/\")\n self.out_path2 = os.path.join(os.path.abspath('.'), \"ESR_temp/ESR_out2/\")\n self.temp_path = os.path.join(os.path.abspath('.'), \"ESR_temp/temp/\")\n self.temp_path2 = os.path.join(os.path.abspath('.'), \"ESR_temp/temp2/\")\n self.fin_path = os.path.join(os.path.abspath('.'), \"ESR_output/\")\n except:\n print(\"ERROR in Detector init: Cannot find ESR_out or some dir within.\")\n return\n # Create esrgan instance for detector instance\n try:\n self.esr_model_path = os.path.join(os.path.abspath('.'), \"4x_FatalPixels_340000_G.pth\")\n except:\n print(\"ERROR in Detector init: ESRGAN model not found, make sure you have 4x_FatalPixels_340000_G.pth in this directory\")\n return\n # Scan for cuda compatible GPU for ESRGAN. Mask-RCNN *should* automatically use a GPU if available.\n self.hardware = 'cpu'\n\n # Clean out temp working images from all directories in ESR_temp. Code from https://stackoverflow.com/questions/185936/how-to-delete-the-contents-of-a-folder\n def clean_work_dirs(self):\n print(\"Cleaning work dirs...\")\n folders = [self.out_path, self.out_path2, self.temp_path, self.temp_path2]\n for folder in folders:\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('ERROR in clean_work_dirs: Failed to delete %s. Reason: %s' % (file_path, e))\n\n # Make sure this is called before using model weights\n def load_weights(self):\n print('Creating model, Loading weights...', end=' ')\n self.model = modellib.MaskRCNN(mode=\"inference\", config=self.config,\n model_dir=DEFAULT_LOGS_DIR)\n try:\n self.model.load_weights(self.weights_path, by_name=True)\n print(\"Weights loaded\")\n except Exception as e:\n print(\"ERROR in load_weights: Model Load. Ensure you have your weights.h5 file!\", end=' ')\n print(e)\n\n \"\"\"Apply cover over image. Based off of Mask-RCNN Balloon color splash function\n image: RGB image [height, width, 3]\n mask: instance segmentation mask [height, width, instance count]\n Returns result covered image.\n \"\"\"\n def apply_cover(self, image, mask, dilation):\n # Copy color pixels from the original color image where mask is set\n green = np.zeros([image.shape[0], image.shape[1], image.shape[2]], dtype=np.uint8)\n green[:,:] = [0, 255, 0]\n\n if mask.shape[-1] > 0:\n # We're treating all instances as one, so collapse the mask into one layer\n mask = (np.sum(mask, -1, keepdims=True) < 1)\n # dilate mask to ensure proper coverage\n mimg = mask.astype('uint8')*255\n kernel = np.ones((dilation,dilation), np.uint8)\n mimg = erode(src=mask.astype('uint8'), kernel=kernel, iterations=1) #\n # dilation returns image with channels stripped (?!?). Reconstruct image channels\n mask_img = np.zeros([mask.shape[0], mask.shape[1],3]).astype('bool')\n mask_img[:,:,0] = mimg.astype('bool')\n mask_img[:,:,1] = mimg.astype('bool')\n mask_img[:,:,2] = mimg.astype('bool')\n\n cover = np.where(mask_img.astype('bool'), image, green).astype(np.uint8)\n else:\n # error case, return image\n cover = image\n return cover, mask\n\n # Similar to above function, except it places the decensored image over the original image.\n def splice(self, image, mask, gan_out):\n if mask.shape[-1] > 0:\n mask = (np.sum(mask, -1, keepdims=True) < 1)\n mask = 1 - mask # invert mask for blending\n mask = mask.astype('uint8')*255\n mask = GaussianBlur(mask, (29,29), 0)\n # mask_img = np.zeros([mask.shape[0], mask.shape[1],3]).astype('uint8')\n # for i in range(3):\n # mask_img[:,:,i] = mask\n mask_img = mask.astype(float) / 255\n # proper blending courtesy of https://www.learnopencv.com/alpha-blending-using-opencv-cpp-python/\n fg_o = gan_out.astype(float)\n bg_o = image.astype(float)\n fg = np.zeros([mask.shape[0], mask.shape[1],3]).astype(float)\n bg = np.zeros([mask.shape[0], mask.shape[1],3]).astype(float) # create foreground and background images with proper rgb channels\n cover = image\n for i in range(3):\n # Multiply the fg with the mask matte\n fg[:,:,i] = multiply(mask_img, fg_o[:,:,i])\n # Multiply the bg with ( 1 - mask_img )\n bg[:,:,i] = multiply(1.0 - mask_img, bg_o[:,:,i])\n # Add the masked fg and bg.\n cover[:,:,i] = add(fg[:,:,i], bg[:,:,i])\n else:\n #error case, return image\n cover=image\n return cover\n\n # function to handle all of the esrgan stuff\n def resize_GAN(self, img_path, img_name, is_video=False):\n # Attempt to obtain image\n try:\n image = skimage.io.imread(img_path) # problems with strange shapes\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image) # convert to rgb if greyscale\n if image.shape[-1] == 4:\n image = image[..., :3] # strip alpha channel\n except Exception as e:\n print(\"ERROR in resize_GAN: Image read. Skipping. image_path=\", img_path)\n print(e)\n return\n # Calculate mosaic granularity.\n granularity = get_mosaic_res(np.array(image))\n if granularity < 10: #TODO: implement adaptive granularity by weighted changes\n print(\"Granularity of image was less than threshold at \", granularity)\n granularity = 10\n # Resize image down\n try:\n mini_img = resize(image, (int(image.shape[1]/granularity), int(image.shape[0]/granularity)), interpolation=INTER_AREA) # TODO: experiment with interpolations\n # After resize, run bilateral filter to keep colors coherent\n file_name = self.temp_path + img_name[:-4] + '.png'\n skimage.io.imsave(file_name, mini_img)\n except Exception as e:\n print(\"ERROR in resize_GAN: resize. Skipping. image_path=\",img_path, e)\n return\n # Now run ESRGAN inference\n gan_img_path = self.out_path + img_name[:-4] + '.png'\n self.esrgan_instance.run_esrgan(test_img_folder=file_name, out_filename=gan_img_path, mosaic_res=granularity)\n\n # Runs hent-AI detection and splice. Mosaic only.\n def ESRGAN(self, img_path, img_name, is_video=False):\n try:\n image = skimage.io.imread(img_path) # problems with strange shapes\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image) # convert to rgb if greyscale\n if image.shape[-1] == 4:\n image = image[..., :3] # strip alpha channel\n except Exception as e:\n print(\"ERROR in detector.ESRGAN: Image read. Skipping. image_path=\", img_path)\n print(e)\n return\n # Run detection first\n r = self.model.detect([image], verbose=0)[0]\n # Remove bars from detection; class 1\n\n if len(r[\"scores\"]) == 0:\n print(\"Skipping image with no detection\")\n return\n remove_indices = np.where(r['class_ids'] != 2)\n new_masks = np.delete(r['masks'], remove_indices, axis=2)\n\n # load image from esrgan\n gan_img_path = self.out_path + img_name[:-4] + '.png'\n gan_image = skimage.io.imread(gan_img_path)\n gan_image = resize(gan_image, (image.shape[1], image.shape[0]))\n # Splice newly enhanced mosaic area over original image\n fin_img = self.splice(image, new_masks, gan_image)\n try:\n # Save output, now force save as png\n file_name = self.fin_path + img_name[:-4] + '.png'\n skimage.io.imsave(file_name, fin_img)\n except Exception as e:\n print(\"ERROR in ESRGAN: Image write. Skipping. image_path=\", img_path, e)\n\n # ESRGAN folder running function\n def run_ESRGAN(self, in_path = None, is_video = False, force_jpg = True):\n assert in_path\n\n # Parse directory for files.\n img_list = []\n for file in os.listdir(in_path):\n try:\n if file.endswith('.png') or file.endswith('.PNG') or file.endswith(\".jpg\") or file.endswith(\".JPG\") or file.endswith(\".mp4\") or file.endswith(\".avi\"):\n img_list.append((in_path + '/' + file, file))\n except Exception as e:\n print(\"ERROR in run_ESRGAN: File parsing. file=\", file, e)\n # begin ESRGAN on every image. Create esrgan instance too.\n star = time.perf_counter()\n self.esrgan_instance = ColabESRGAN.test.esrgan(model_path=self.esr_model_path, hw=self.hardware)\n for img_path, img_name in img_list:\n self.resize_GAN(img_path=img_path, img_name=img_name, is_video=is_video)\n # destroy esrgan model. Create hent-AI model.\n # self.esrgan_instance = []\n del self.esrgan_instance\n for img_path, img_name in img_list:\n self.ESRGAN(img_path=img_path, img_name=img_name, is_video=is_video)\n fin = time.perf_counter()\n total_time = fin-star\n print(\"Completed ESRGAN detection and decensor in {:.4f} seconds\".format(total_time))\n self.clean_work_dirs() #NOTE: DISABLE ME if you want to keep the images in the working dirs\n #TODO: maybe unload hent-AI tf model here\n\n def detect_and_cover(self, censored_img, dilation=0):\n # Run on Image\n image = np.array(censored_img) # skimage.io.imread(image_path) # problems with strange shapes\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image) # convert to rgb if greyscale\n if image.shape[-1] == 4:\n image = image[..., :3] # strip alpha channel\n\n # Detect objects\n try:\n r = self.model.detect([image], verbose=0)[0]\n except Exception as e:\n print(\"ERROR in detect_and_cover: Model detection.\",e)\n return\n # Remove unwanted class, code from https://github.com/matterport/Mask_RCNN/issues/1666\n remove_indices = np.where(r['class_ids'] != 1) # remove mosaic: class 1\n new_masks = np.delete(r['masks'], remove_indices, axis=2)\n\n cov, mask = self.apply_cover(image, new_masks, dilation)\n\n if mask.size == 0:\n raise NoCensoredRegionsFoundError(\"No censored regions detected.\")\n\n return Image.fromarray(cov.astype(\"uint8\"))\n\n # Function for file parsing, calls the aboven detect_and_cover\n def run_on_folder(self, input_folder, output_folder, is_video=False, orig_video_folder=None, is_mosaic=False, dilation=0):\n assert input_folder\n assert output_folder # replace with catches and popups\n\n self.esrgan_instance = [] # rare case where esrgan instance not destroyed but new action started, catch it here\n if dilation < 0:\n print(\"ERROR: dilation value < 0\")\n return\n print(\"Will expand each mask by {} pixels\".format(dilation/2))\n\n file_counter = 0\n # obtain inputs from the input folder\n img_list = []\n for file in os.listdir(str(input_folder)):\n file_s = str(file)\n try:\n if file_s.endswith('.png') or file_s.endswith('.PNG') or file_s.endswith(\".jpg\") or file_s.endswith(\".JPG\"):\n img_list.append((input_folder + '/' + file_s, file_s))\n except:\n print(\"ERROR in run_on_folder: File parsing. file=\", file_s)\n\n # save run detection with outputs to output folder\n for img_path, img_name in img_list:\n star = time.perf_counter()\n self.detect_and_cover(img_path, img_name, output_folder, is_mosaic=is_mosaic, dilation=dilation) #sending force_jpg for debugging\n fin = time.perf_counter()\n total_time = fin-star\n print('Detection on image', file_counter,'of', len(img_list),'finished in {:.4f} seconds'.format(total_time))\n file_counter += 1\n","repo_name":"erogaki-dev/hent-AI-erogaki-wrapper","sub_path":"src/wrapper_detector.py","file_name":"wrapper_detector.py","file_ext":"py","file_size_in_byte":15054,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"19198031834","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\n\nclass DataQualityOperator(BaseOperator):\n\n ui_color = '#89DA59'\n\n @apply_defaults\n def __init__(self,\n redshift_conn_id=\"\",\n tables={},\n *args, **kwargs):\n\n super(DataQualityOperator, self).__init__(*args, **kwargs)\n self.tables = tables\n self.redshift_conn_id = redshift_conn_id\n\n def execute(self, context):\n \"\"\"\n The data quality operator is used to run checks on the data itself. \n The operator's main functionality is to receive one or more SQL based test cases along with the expected results and execute the tests. \n For each the test, the test result and expected result needs to be checked and if there is no match, the operator should raise an exception and the task should retry and fail eventually.\n It uses the following parameters:\n - redshift_conn_id : contains the connection details to the data warehouse in Amazon Redshift (from in Airflow)\n - retries : number of retries before raising an exception\n - tables : a dictionary containg table names as keys and a list of tests to run on these tables as values associated to the keys.\n \"\"\" \n self.log.info('Testing data quality')\n # coneecting to the redshift\n redshift_hook = PostgresHook(self.redshift_conn_id)\n\n # for each table in the dictionnary\n for table in self.tables:\n records = redshift_hook.get_records(f\"SELECT COUNT(*) FROM {table}\")\n # check that there are records in the table\n if len(records) < 1 or len(records[0]) < 1:\n raise ValueError(f\"Data quality check failed. {table} returned no results\")\n num_records = records[0][0]\n # check that there are rows in the table\n if num_records < 1:\n raise ValueError(f\"Data quality check failed. {table} contained 0 rows\")\n # for each column with the NOT NULL constraint\n columnList = self.tables[table][0]\n for column in columnList:\n nullCount = redshift_hook.get_records(f\"SELECT COUNT(*) FROM {table} WHERE {column} IS NULL\")\n # check the NOT NULL constraint \n if len(nullCount) < 1 or nullCount[0][0] > 0:\n raise ValueError(f\"Data quality check failed. {column} column contains {nullCount[0][0]} NULL values\")\n # for each query in the query/exepected result dictionary\n queryResultDictionary = self.tables[table][1]\n for query in queryResultDictionary:\n # run the query and get the number of rows for the result\n count = redshift_hook.get_records(query)[0][0]\n # verify if the number of rows matches the expected number of rows.\n if count != queryResultDictionary[query]:\n raise ValueError(f\"Data quality check failed. Query {query} contained {count} rows. {queryResultDictionary[query]} was expected\")\n self.log.info(f\"Data quality on table {table} check passed with {records[0][0]} records\")\n ","repo_name":"nadirl00/Data-Engineering-Project-5-Data-Pipelines-with-Airflow","sub_path":"plugins/operators/data_quality.py","file_name":"data_quality.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"38544030245","text":"import inspect\nfrom typing import Any, Sequence\nfrom jax import numpy as jnp\nimport onnx\nfrom .onnx_utils import contain_subgraph\nfrom .onnx_utils import get_graph_input\n\n\ndef convert_onnx(attr_proto: onnx.AttributeProto) -> Any:\n \"\"\"Convert an ONNX attribute to a Python object.\n\n Args:\n attr_proto: An ONNX AttributeProto object.\n\n Returns:\n A Python object corresponding to the attribute value.\n\n Raises:\n ValueError: If the attribute type is not supported.\n \"\"\"\n if attr_proto.HasField('f'):\n return attr_proto.f\n elif attr_proto.HasField('i'):\n return attr_proto.i\n elif attr_proto.HasField('s'):\n return str(attr_proto.s, 'utf-8')\n elif attr_proto.HasField('t'):\n return attr_proto.t # this is a proto!\n elif attr_proto.HasField('g'):\n return attr_proto.g\n elif attr_proto.floats:\n return tuple(attr_proto.floats)\n elif attr_proto.ints:\n return tuple(attr_proto.ints)\n elif attr_proto.strings:\n str_list = tuple(map(lambda x: str(x, 'utf-8'), list(attr_proto.strings)))\n return str_list\n elif attr_proto.HasField('sparse_tensor'):\n return attr_proto.sparse_tensor\n else:\n raise ValueError('Unsupported ONNX attribute: {}'.format(attr_proto))\n\n\nclass OnnxNode:\n \"\"\"A class that wraps an ONNX NodeProto as an OnnxNode object.\n\n Attributes:\n name (str): The name of the node.\n op_type (str): The type of the operation performed by the node.\n domain (str): The domain of the node.\n attrs (dict): A dictionary of attributes for the node, where the keys are\n the attribute names and the values are the attribute values.\n attrs_dict (dict): A dict of the attributes for the node, it is for the jax\n onnx implementation keyword arguments.\n inputs (list): A list of the node's input names.\n subgraph_inputs (list): A list of the input names of subgraphs.\n outputs (list): A list of the node's output names.\n node_proto (onnx.NodeProto): The underlying ONNX NodeProto object.\n context_graph (Any): The graph context that contains the node.\n \"\"\"\n\n def __init__(self, node: onnx.NodeProto, context_graph: Any = None):\n \"\"\"Creates an OnnxNode object from an ONNX NodeProto object.\n\n Args:\n node (onnx.NodeProto): The ONNX NodeProto object to wrap.\n context_graph (Any): The graph context that contains the node.\n \"\"\"\n self.name: str = str(node.name)\n self.op_type: str = str(node.op_type)\n self.domain: str = str(node.domain)\n self.attrs: dict[str, Any] = dict(\n [(attr.name, convert_onnx(attr)) for attr in node.attribute]\n )\n self.attrs_dict: dict[str, Any] = {}\n self.inputs: list[str] = list(node.input)\n self.subgraph_inputs: list[str] = []\n self.outputs: list[str] = list(node.output)\n self.node_proto: onnx.NodeProto = node\n self.context_graph: Any = context_graph\n\n # For operators that involve control flow, OnnxNode is defined to be\n # a self-contained operator, different from Onnx.NodeProto.\n # The inputs to the subgraphs are added to the inputs to this parent\n # control flow operator.\n if contain_subgraph(node):\n for a in node.attribute:\n if a.HasField('g'):\n subg_inputs = get_graph_input(a.g)\n self.subgraph_inputs.extend(subg_inputs)\n\n @property\n def len_inputs(self) -> int:\n \"\"\"The number of input tensors of the ONNX node.\"\"\"\n return len(self.inputs)\n\n @property\n def len_outputs(self) -> int:\n \"\"\"The number of output tensors of the ONNX node.\"\"\"\n return len(self.outputs)\n\n def get_constant_node_value(self) -> Any:\n \"\"\"Returns the value of the constant node.\"\"\"\n assert self.node_proto.op_type == 'Constant', self.node_proto.op_type\n result = None\n attr_to_dtype = {\n 'value_int': jnp.int64,\n 'value_ints': jnp.int64,\n 'value_float': jnp.float32,\n 'value_floats': jnp.float32,\n }\n\n matched = 0\n if 'value_string' in self.attrs:\n result = self.attrs['value_string']\n matched = matched + 1\n elif 'value_strings' in self.attrs:\n result = self.attrs['value_strings']\n matched = matched + 1\n elif 'value' in self.attrs:\n result = onnx.numpy_helper.to_array(self.attrs['value'])\n matched = matched + 1\n else:\n for item in attr_to_dtype:\n if item in self.attrs:\n result = jnp.array(self.attrs[item], dtype=attr_to_dtype[item])\n matched = matched + 1\n\n assert (\n matched == 1\n ), f'Should only provide one of value attributes, but get {matched}'\n return result\n\n\ndef update_node_attr_dict_with_jax_func_kwargs(\n node: 'OnnxNode', onnx_jax_impl: Any\n):\n \"\"\"Update the node attributes dict with the jax onnx implementation kwargs.\"\"\"\n sig = inspect.signature(onnx_jax_impl)\n kwparams = [\n param.name\n for param in sig.parameters.values()\n if param.kind == inspect.Parameter.KEYWORD_ONLY\n ]\n for name in kwparams:\n node.attrs_dict[name] = node.attrs.get(name, None)\n\n\ndef pad_sequence(sequence: Sequence[Any], length: int, pad_value: Any = None):\n \"\"\"Pad a sequence to the length of the sequence.\"\"\"\n assert len(sequence) <= length, f'{len(sequence)} >= {length}'\n return list(sequence) + [pad_value] * (length - len(sequence))\n","repo_name":"google/jaxonnxruntime","sub_path":"jaxonnxruntime/core/onnx_node.py","file_name":"onnx_node.py","file_ext":"py","file_size_in_byte":5221,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"81"} +{"seq_id":"3375747736","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*- \n# RaspberryGPIO - piCamera.py\n# 2018/12/15 10:19\n# Author:Kencin <myzincx@gmail.com>\nfrom picamera import PiCamera\nimport time\n\n\nclass ThePiCamera(object):\n def __init__(self):\n self.camera = PiCamera()\n # self.camera.resolution = (720, 480) # 设置照片分辨率\n self.camera.resolution = (2592, 1944) # 原始分辨率,即500万像素\n\n def take_photo(self):\n ticks = int(time.time())\n file_name = 'raspi%s.jpg' % ticks\n file_path = '/mnt/hdd/PiPhotos/%s' % file_name\n self.camera.start_preview() # 预热两秒以获得更清晰的照片\n time.sleep(2)\n self.camera.capture(file_path)\n self.camera.close()\n return file_path, file_name\n","repo_name":"kencin/RaspberryGPIO","sub_path":"gpioMoudle/piCamera.py","file_name":"piCamera.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"34193122110","text":"#!/usr/bin/env python3\n# Name: Bryan Thornlow\n# Date: 6/6/2017\n# gffToBed.py\n\nimport sys\nimport os\nimport time\nimport random\nimport numpy\nimport gzip\nimport math\nfrom random import shuffle\nimport operator\n\n\"\"\"\nThis program grabs a set number of exons from a bed file of 4d sites\nto make a smaller file more easily used to train a PhyloP model.\n\"\"\"\n\nimport sys, argparse, random, math\n\nclass CommandLine(object):\n \"\"\"Handles the input arguments from the command line. Manages \n the argument parser.\n\n Methods:\n Other than initialization, no methods are present, as its purpose is \n simply to handle what is passed into the command line and pass that \n into the class that performs the searching algorithm.\"\"\"\n\n def __init__(self, inOpts=None):\n '''\n CommandLine constructor.\n Implements a parser to interpret the command line input using argparse.\n '''\n self.parser = argparse.ArgumentParser()\n self.parser.add_argument(\"-i\", \"--inputFile\", help=\"Input .bed\"+\n \" of 4d sites to reduce.\")\n self.parser.add_argument(\"-o\", \"--outputFile\", help=\"The path to\"+\n \" your desired output file.\", default='')\n self.parser.add_argument(\"-n\", \"--numExons\", help=\"Number of\"+\n \" exons to include in the file.\", default=100000)\n self.args = self.parser.parse_args()\n\nclass fileConverter(object):\n \"\"\"\n Primary class where filetype is converted.\n \"\"\"\n\n def __init__(self, inputFile, outputFile, numExons):\n self.inputFile = inputFile\n self.outputFile = outputFile\n self.numExons = numExons\n\n def reduce4dBeds(self):\n allLines = []\n keepLines = []\n myOutString = ''\n for line in open(self.inputFile):\n allLines.append((line.strip()).split('\\t'))\n shuffle(allLines)\n for i in range(0,self.numExons):\n keepLines.append(allLines[i])\n for k in sorted(keepLines, key=operator.itemgetter(0,1)):\n myOutString += joiner(k)+'\\n'\n open(self.outputFile, 'w').write(myOutString)\n\ndef joiner(entry):\n newList = []\n for k in entry:\n newList.append(str(k))\n return '\\t'.join(newList)\n\n\ndef main(myCommandLine=None):\n \"\"\"\n Initializes a CommandLine object and passes the provided \n arguments into a new fileConverter object and calls main method.\n \"\"\"\n myCommandLine = CommandLine()\n\n if myCommandLine.args.inputFile:\n inputFile = myCommandLine.args.inputFile\n\n if myCommandLine.args.outputFile:\n outputFile = myCommandLine.args.outputFile\n\n if len(myCommandLine.args.outputFile) == 0:\n outputFile = inputFile.split('.')[0]+'reduced.bed'\n\n if myCommandLine.args.numExons:\n numExons = int(myCommandLine.args.numExons)\n\n myFileConverter = fileConverter(inputFile, outputFile, numExons)\n myFileConverter.reduce4dBeds()\n\nif __name__ == \"__main__\":\n \"\"\"\n Calls main when program is run by user.\n \"\"\"\n main();\n raise SystemExit\n\n\n\n\n\n\n\n","repo_name":"bpt26/tRAP","sub_path":"reduce4d.py","file_name":"reduce4d.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"39118186618","text":"\"\"\"\nmcmc.py\n\ntest script for MH algorithm for some rather silly example\n\nstates -> some number on the real number line. \n\nq(x | x') = q(x' | x) for _detailed balance condition_?\nevaluation: p(x) = some objective function (say tree splits)\n\n\n\"\"\"\n\n\nimport numpy as np \nfrom sklearn import datasets\nfrom scipy import stats\n\niris = datasets.load_iris()\n\nX = iris.data[:, 1] # take the 2nd column only\nY = iris.target\n\ndef get_best_classification(Y):\n \"\"\"returns the number of most frequent class \n as a percentage of total\"\"\"\n if Y.shape[0] == 0:\n return 0\n return stats.mode(Y)[1][0]/float(Y.shape[0])\n \n\ndef get_metric(split, X, Y):\n \"\"\"returns the metric related to the split, the higher the better, must \n be between 0, 1\"\"\"\n split_part = np.where(X > split)\n true_part = len(split_part[0])\n weighted_metric = (true_part * get_best_classification(Y[X>split]) + (len(Y)-true_part)*get_best_classification(Y[X<=split]))/float(len(Y))\n return weighted_metric\n base_metric = get_best_classification(Y)\n \n return (weighted_metric - base_metric)/(1-base_metric)\n\n#print(get_metric(2, X, Y))\n#print(get_metric(3, X, Y))\n\nuniq_x = list(set(X.tolist()))\nmap_brute = {x:get_metric(x, X, Y) for x in uniq_x}\nmaximum_x = max(map_brute, key=map_brute.get) # Just use 'min' instead of 'max' for minimum.\nprint(\"best solution via brute force:\", maximum_x, map_brute[maximum_x])\n\n\n\n# use the above for MCMC via MH, the distribution would be...normal \n# around the point with variance of say 2...\n\nlower, upper = min(X), max(X)\nmu, sigma = np.mean(X), np.std(X)\ntruncnorm_X = stats.truncnorm(\n (lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)\n\nN = 10000\n\n# x0...initialize randomly...\nx = float(truncnorm_X.rvs(1)[0])\nx = min(X)\nall_x = [x]\n\nfor _ in range(N):\n # u ~ U(0,1)\n u = np.random.uniform(size=1)[0]\n \n # x* ~ q(x*|xi)\n #' if we used truncnorm, the detailed balance condition will be satisfied\n x_star = float(truncnorm_X.rvs(1)[0])\n \n # pi(x*)q(x|x*) \n numerator = get_metric(x_star, X, Y) * stats.truncnorm.pdf(x=x, a=(lower - mu) / sigma, \n b=(upper - mu) / sigma, loc=x_star, scale=sigma)\n \n # pi(x)q(x*|x)\n denominator = get_metric(x, X, Y) * stats.truncnorm.pdf(x=x_star, a=(lower - mu) / sigma, \n b=(upper - mu) / sigma, loc=x, scale=sigma)\n \n if denominator == 0:\n #print(\"denominator is 0\", x, \", \", x_star)\n all_x.append(x)\n elif u < min(1, float(numerator)/denominator):\n all_x.append(x_star)\n x = x_star\n else:\n all_x.append(x)\n \n \n all_x = all_x[-100:]\n \n\n#print(\"\\n\\n---\")\n#print(all_x[:-10])\n#print(get_metric(all_x[-1], X, Y))\nprint(all_x[-1], get_metric(all_x[-1], X, Y)) \n\n","repo_name":"charliec443/RJMCMC","sub_path":"mcmc.py","file_name":"mcmc.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42644827641","text":"\ndef solution(absolutes,signs):\n answer = 0 \n for index in range(len(absolutes)):\n if signs[index]==True:\n answer+= +absolutes[index]\n else:\n answer+= -absolutes[index]\n print(answer)\n return answer\n\n\nsolution()\n\n\n# from functools import reduce\n# def solution(absolutes,signs):\n# answer = []\n# tmp = 0\n# answer = map(lambda x: pass if signs[x]==True else absolutes[x]= range(signs[x]))\n# reduce(lambda x,y:x+y,answer)\n# solution([4,7,12],[True,False,True])","repo_name":"6democratickim9/study_algo","sub_path":"programmers/add_pro.py","file_name":"add_pro.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18088874991","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# operations.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: jledesma <jledesma@student.42malaga.com +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2023/04/12 11:18:22 by jledesma #+# #+# #\n# Updated: 2023/04/15 12:18:34 by jledesma ### ########.fr #\n# #\n# **************************************************************************** #\n\nimport sys\n\ndef operations(nbr):\n if len(nbr) > 3:\n print(\"AssertionError: numbers incorrects arguments\")\n exit()\n elif len(nbr) <= 2:\n print(\"AssertionError: numbers incorrects arguments\")\n print(\"Usage:\\n python operations.py <number1> <number2>\")\n print(\"Example:\\n python operations.py 4 2\")\n exit()\n\n try:\n nbr1 = int(nbr[1])\n nbr2 = int(nbr[2])\n except ValueError:\n print(\"AssertionError: only integers\")\n exit()\n nbr1 = int(nbr[1])\n nbr2 = int(nbr[2])\n \n print(f\"Sum:\t{nbr1 + nbr2} \")\n print(f\"Difference:{nbr1 - nbr2} \")\n print(f\"Product: {nbr1 * nbr2} \")\n if nbr2 == 0:\n print(\"ERROR (division by zero)\")\n else:\n print(f\"Quotient: {nbr1 / nbr2} \")\n if nbr2 == 0:\n print(\"ERROR (modulo by zero)\")\n else:\n print(f\"Remainder: {nbr1 % nbr2} \")\n\nif __name__==\"__main__\":\n operations(sys.argv)","repo_name":"Falitomal/Python-for-cybersecurity-courses-and-more","sub_path":"Module00/ex04/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40445787787","text":"class User:\r\n def __init__(self, name):\r\n self.name = name\r\n self.amount = 0\r\n\r\n def make_deposit(self, amount):\r\n self.amount += amount\r\n return self\r\n\r\n def make_withdrawal(self, amount):\r\n self.amount -= amount\r\n return self\r\n\r\n def display_user_balance(self):\r\n print(f\"User: {self.name}, Balance: ${self.amount}\")\r\n return self\r\n\r\n def transfer_money(self, amount, user):\r\n self.amount -= amount\r\n user.amount += amount\r\n self.display_user_balance()\r\n user.display_user_balance()\r\n return self\r\n\r\njosh_g = User(\"Mr. G\")\r\njacob = User(\"Jake\")\r\npaul = User(\"Pockets\")\r\n\r\njosh_g.make_deposit(1000).make_deposit(750).make_deposit(7000).make_withdrawal(3500).display_user_balance()\r\n\r\njacob.make_deposit(3250).make_deposit(1000).make_withdrawal(500).make_withdrawal(625).display_user_balance()\r\n\r\npaul.make_deposit(9750).make_withdrawal(500).make_withdrawal(75).make_withdrawal(200).display_user_balance()\r\n\r\njosh_g.transfer_money(200, paul)","repo_name":"JoshTGibs/OOP-basics","sub_path":"chaining_methods.py","file_name":"chaining_methods.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22295523887","text":"# Created by lan at 2021/4/28\nimport ast\nimport itertools\nimport json\nimport os\n\nimport luigi\nfrom luigi.mock import MockTarget\n\nfrom aggrecol.task.IndividualAggregationDetectionTask import SumDetectionTask, SubtractionDetectionTask, AverageDetectionTask, DivisionDetectionTask, \\\n RelativeChangeDetectionTask\nfrom helpers import AggregationOperator, AggregationDirection\n\n\ndef _filter_aggregations_by_signature(results_by_signature):\n # add a mark to the key. It indicates if this entry should be filtered out. A \"1\" means should be filtered out. All marks are initialized as \"0\"\n marks = [0 for _ in range(len(results_by_signature))]\n signatures = list(results_by_signature.keys())\n # aggregation_list = list(marked_results_by_signature.values())\n for index, (signature, aggregations) in enumerate(results_by_signature.items()):\n # signature is a 2er-tuple: (Signature, mark)\n\n # if mark is \"1\", skip this entry\n if marks[index] == 1:\n continue\n\n this_aggor = signature[0][0]\n this_aggees = signature[0][1]\n this_operator = signature[1]\n\n # set the marks of those entries after this, which should be filtered out, as \"1\"\n if all([e == 1 for e in marks[index + 1: len(results_by_signature)]]):\n break\n for that_index in range(index + 1, len(results_by_signature)):\n that_signature = signatures[that_index]\n if marks[that_index] == 1:\n continue\n that_aggor = that_signature[0][0]\n that_aggees = that_signature[0][1]\n that_operator = that_signature[1]\n\n # if complete inclusion is satisfied (either way), filter out\n if this_operator == AggregationOperator.DIVISION.value or that_operator == AggregationOperator.DIVISION.value:\n continue\n\n if that_aggor in this_aggees and len(set(this_aggees).intersection(set(that_aggees))) > 0:\n marks[that_index] = 1\n if this_aggor in that_aggees and len(set(this_aggees).intersection(set(that_aggees))) > 0:\n marks[that_index] = 1\n\n # if same aggregator, inclusive aggregatees happens (either way), filter out\n if this_aggor == that_aggor and len(set(this_aggees).intersection(set(that_aggees))) > 0:\n marks[that_index] = 1\n\n # if circular aggregator-aggregatee happens (either way), filter out\n if this_aggor in that_aggees and that_aggor in this_aggees:\n marks[that_index] = 1\n preserved_signatures = [signature for index, signature in enumerate(signatures) if marks[index] == 0]\n preserved_aggregations = [v for k, v in results_by_signature.items() if k in preserved_signatures]\n return list(itertools.chain(*preserved_aggregations))\n\n\ndef combine_aggregation_results(file_dict):\n file_aggrdet_results = file_dict['aggregation_detection_result']\n\n for number_format in file_aggrdet_results.keys():\n filtered_results = []\n # row wise fusion\n row_wise_aggr_results = [result for result in file_aggrdet_results[number_format] if result[3] == AggregationDirection.ROW_WISE.value]\n\n # group by operator and column signature. Column signature of an aggregation is the column index of aggregator and column indices of aggregatees\n result_grp_by_column_sign_operator = {}\n for aggrdet_result in row_wise_aggr_results:\n aggregator_index = ast.literal_eval(aggrdet_result[0])\n aggregatees_indices = [ast.literal_eval(aggregatee_index) for aggregatee_index in aggrdet_result[1]]\n column_signature = (aggregator_index[1], tuple([e[1] for e in aggregatees_indices]))\n operator = aggrdet_result[2]\n\n signature = (column_signature, operator)\n if signature not in result_grp_by_column_sign_operator:\n result_grp_by_column_sign_operator[signature] = []\n result_grp_by_column_sign_operator[signature].append(aggrdet_result)\n\n # order the result groups by 1) the length of aggregatee list; 2) the number of their detected aggregations\n # the higher the group is in the rank, the more detected aggregations in a group and the longer the aggregatee list is\n result_grp_by_column_sign_operator = {k: v for k, v in\n sorted(result_grp_by_column_sign_operator.items(), key=lambda e: (len(e[0][0][1]), len(e[1])),\n reverse=True)}\n filtered_results_row_wise = _filter_aggregations_by_signature(result_grp_by_column_sign_operator)\n filtered_results.extend(filtered_results_row_wise)\n\n # column wise fusion\n # aggrdet_result is a 4er-tuple, (Aggregator_index_string, Aggregatee_indices_strings, Operator_type, Aggregation_direction)\n column_wise_aggr_results = [result for result in file_aggrdet_results[number_format] if result[3] == AggregationDirection.COLUMN_WISE.value]\n\n # group by operator and row signature. Row signature of an aggregation is the row index of aggregator and row indices of aggregatees\n result_grp_by_row_sign_operator = {}\n for aggrdet_result in column_wise_aggr_results:\n aggregator_index = ast.literal_eval(aggrdet_result[0])\n aggregatees_indices = [ast.literal_eval(aggregatee_index) for aggregatee_index in aggrdet_result[1]]\n row_signature = (aggregator_index[0], tuple([e[0] for e in aggregatees_indices]))\n operator = aggrdet_result[2]\n\n signature = (row_signature, operator)\n if signature not in result_grp_by_row_sign_operator:\n result_grp_by_row_sign_operator[signature] = []\n result_grp_by_row_sign_operator[signature].append(aggrdet_result)\n\n # order the result groups by 1) the length of aggregatee list; 2) the number of their detected aggregations\n # the higher the group is in the rank, the more detected aggregations in a group and the longer the aggregatee list is\n result_grp_by_row_sign_operator = {k: v for k, v in\n sorted(result_grp_by_row_sign_operator.items(), key=lambda e: (len(e[0][0][1]), len(e[1])), reverse=True)}\n filtered_results_column_wise = _filter_aggregations_by_signature(result_grp_by_row_sign_operator)\n filtered_results.extend(filtered_results_column_wise)\n\n file_aggrdet_results[number_format] = filtered_results\n pass\n\n\nclass CollectiveAggregationDetectionTask(luigi.Task):\n dataset_path = luigi.Parameter()\n result_path = luigi.Parameter('./debug/')\n error_level_dict = luigi.DictParameter(default={'Sum': 0, 'Average': 0, 'Division': 0, 'RelativeChange': 0})\n target_aggregation_type = luigi.Parameter(default='All')\n coverage = luigi.FloatParameter(default=0.7)\n use_extend_strategy = luigi.BoolParameter(default=False, parsing=luigi.BoolParameter.EXPLICIT_PARSING)\n timeout = luigi.FloatParameter(default=300)\n\n debug = luigi.BoolParameter(default=False, parsing=luigi.BoolParameter.EXPLICIT_PARSING)\n\n gathered_detection_results = None\n\n def output(self):\n return luigi.LocalTarget(os.path.join(self.result_path, 'combined-aggregation-results.jl'))\n\n def requires(self):\n sum_detector = {\n 'sum_detector': SumDetectionTask(\n dataset_path=self.dataset_path, result_path=self.result_path,\n error_level_dict=self.error_level_dict,\n use_extend_strategy=self.use_extend_strategy,\n coverage=self.coverage,\n timeout=self.timeout, debug=self.debug),\n 'subtraction_detector': SubtractionDetectionTask(\n dataset_path=self.dataset_path, result_path=self.result_path,\n error_level_dict=self.error_level_dict,\n use_extend_strategy=self.use_extend_strategy,\n coverage=self.coverage,\n timeout=self.timeout, debug=self.debug\n )\n }\n average_detector = {'average_detector': AverageDetectionTask(dataset_path=self.dataset_path, result_path=self.result_path,\n error_level_dict=self.error_level_dict,\n use_extend_strategy=self.use_extend_strategy,\n coverage=self.coverage,\n timeout=self.timeout,\n debug=self.debug)}\n division_detector = {\n 'division_detector': DivisionDetectionTask(dataset_path=self.dataset_path, result_path=self.result_path,\n error_level_dict=self.error_level_dict,\n use_extend_strategy=self.use_extend_strategy,\n coverage=self.coverage,\n timeout=self.timeout, debug=self.debug)}\n relative_change_detector = {\n 'relative_change_detector': RelativeChangeDetectionTask(dataset_path=self.dataset_path, result_path=self.result_path,\n error_level_dict=self.error_level_dict,\n use_extend_strategy=self.use_extend_strategy,\n coverage=self.coverage,\n timeout=self.timeout, debug=self.debug)}\n\n all_detectors = {**sum_detector, **average_detector, **division_detector, **relative_change_detector}\n\n required = {AggregationOperator.SUM.value: sum_detector,\n AggregationOperator.AVERAGE.value: average_detector,\n AggregationOperator.DIVISION.value: division_detector,\n AggregationOperator.RELATIVE_CHANGE.value: relative_change_detector,\n 'All': all_detectors}.get(self.target_aggregation_type, None)\n\n if required is None:\n raise RuntimeError('Given target aggregation type parameter is illegal.')\n\n return required\n\n def run(self):\n gathered_detection_results_by_file_signature = {}\n for key, _ in self.input().items():\n with self.input()[key].open('r') as file_reader:\n file_dicts = [json.loads(line) for line in file_reader]\n for file_dict in file_dicts:\n file_signature = (file_dict['file_name'], file_dict['sheet_name'])\n if file_signature not in gathered_detection_results_by_file_signature:\n gathered_detection_results_by_file_signature[file_signature] = file_dict\n else:\n gathered = gathered_detection_results_by_file_signature[file_signature]['aggregation_detection_result']\n for number_format in gathered.keys():\n if number_format in gathered:\n gathered[number_format].extend(file_dict['aggregation_detection_result'][number_format])\n else:\n gathered[number_format] = file_dict['aggregation_detection_result'][number_format]\n\n self.gathered_detection_results = list(gathered_detection_results_by_file_signature.values())\n for file_dict in self.gathered_detection_results:\n combine_aggregation_results(file_dict)\n\n with self.output().open('w') as file_writer:\n for file_dict in self.gathered_detection_results:\n file_writer.write(json.dumps(file_dict) + '\\n')\n","repo_name":"lanchiang/AggreCol","sub_path":"aggrdet/aggrecol/task/CollectiveAggregationDetectionTask.py","file_name":"CollectiveAggregationDetectionTask.py","file_ext":"py","file_size_in_byte":11957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72807787465","text":"import sys\n\ndef solution(n):\n dp = [0]*10\n dp[-1] = 1\n for i in range(n+1):\n for k in range(8, -1, -1):\n dp[k] += dp[k+1]\n return dp[0]%10_007\ndef main():\n n = int(sys.stdin.readline())\n\n print(solution(n))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"fineman999/Algorithm","sub_path":"BaekJoon/problems/climb.py","file_name":"climb.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8715661355","text":"def wyznacznikKwadratowa(macierz, stopien):\n\n width = len(macierz)\n if width == 1:\n return stopien * macierz[0][0]\n else:\n znak = -1\n wynik = 0\n for i in range(width):\n temp1 = []\n for j in range(1, width):\n temp2 = []\n for k in range(width):\n if k != i:\n temp2.append(macierz[j][k])\n temp1.append(temp2)\n znak *= -1\n wynik = wynik + stopien * wyznacznikKwadratowa(temp1, znak * macierz[0][i])\n #znakHelper to zmienna pilnująca żeby przy rekurencji zmieniał się znak +,-,+,- itd.\n #znak robi to samo ale do ogólnego wyniku\n return wynik\n\ntab = [[1,2,1],[2,1,2],[2,2,1]]\n\nprint(wyznacznikKwadratowa(tab, 1))\n","repo_name":"tateusz76/MIWMateuszTraczyk","sub_path":"det.py","file_name":"det.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36418962698","text":"from random import randint\n\nfrom flask import request\nfrom flask_smorest import Blueprint\n\nfrom models.model import db, Exercise4Course, Exercise\nfrom settings.setting import Exercise_info, EXERCISE_TIANKONG, Exercise_xuanze\nfrom utils.my_route import route_code\n\npaper_bp = Blueprint(\"paper\", __name__, url_prefix=\"/\")\n\n\n@paper_bp.route(\"/paper\")\ndef get_exercises():\n # json = request.json\n json = request.args\n course_id = json.get(\"course_id\")\n\n # 获取课程相关题目id\n exercises_ids = db.session.query(Exercise4Course.exercise_id).filter_by(course_id=course_id).all()\n exercises_ids = [i.exercise_id for i in exercises_ids]\n\n # 根据题类型获取题目\n exercise_x = db.session.query(*Exercise_info).filter(Exercise.id.in_(exercises_ids),\n Exercise.title_type == Exercise_xuanze).all()\n exercise_t = db.session.query(*Exercise_info).filter(Exercise.id.in_(exercises_ids),\n Exercise.title_type == EXERCISE_TIANKONG).all()\n\n # 随机组合20到\n ret_exercise = [exercise_x[randint(0, len(exercise_x))] for _ in range(10)] + \\\n [exercise_t[randint(0, len(exercise_t))] for _ in range(10)]\n\n # 创建一张模拟考试的卷子\n # 用户与卷子关系\n\n return route_code(ret_exercise)\n","repo_name":"SunXianyong/msmk","sub_path":"view/paper.py","file_name":"paper.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74931070986","text":"class Bank:\n def __init__(self, name) -> None:\n self.name = name\n self.__all_accounts = []\n self.loan_status = True\n self.bank_balance = 0\n\n def create_bank_account(self, name, email, password):\n account = Bank_account(name, email, password)\n account.role = 'Customer'\n self.__all_accounts.append(account)\n return account\n\n def create_admin_account(self, name, email, password):\n account = Bank_account(name, email, password)\n account.role = 'ADMIN'\n self.__all_accounts.append(account)\n return account\n\n def total_balance_of_bank(self, user):\n if user.role == 'ADMIN':\n total_balance = 0\n for account in self.__all_accounts:\n total_balance += account.my_balance\n print(f'Total balance of The {self.name} ${total_balance}\\n')\n else:\n print(f'UNAUTHORIZED ACCESS/ACCESS DENIED!!!\\n')\n\n def total_loan(self, user):\n if user.role == 'ADMIN':\n total_loan = 0\n for account in self.__all_accounts:\n total_loan += account.my_loan\n print(f'Total loan given by the Bank ${total_loan}\\n')\n else:\n print(f'UNAUTHORIZED ACCESS/ACCESS DENIED!!!\\n')\n\n def active_loan(self, user):\n if user.role == 'ADMIN':\n if self.loan_status == True:\n self.loan_status = False\n else:\n self.loan_status = True\n else:\n print(f'UNAUTHORIZED ACCESS/ACCESS DENIED!!!\\n')\n\n def get_loan_status(self):\n return self.loan_status\n\n def set_bank_balance(self, amount):\n self.bank_balance += amount\n\n # def __repr__(self) -> str:\n # # print(len(self.__all_accounts))\n # print(self.loan_status)\n # return ''\n\n\nclass Bank_account:\n def __init__(self, name, email, password) -> None:\n self.name = name\n self.email = email\n self.password = password\n self.my_balance = 0\n self.transactions = []\n self.my_loan = 0\n\n def deposit(self, amount):\n self.my_balance += amount\n self.transactions.append(f'Deposited amount: ${amount}')\n world_bank.set_bank_balance(amount)\n print(\n f'{self.name} deposited ${amount} successfully! New Balance: ${self.my_balance}\\n')\n\n def withdraw(self, amount):\n if self.my_balance >= amount:\n self.my_balance -= amount\n world_bank.set_bank_balance(-amount)\n self.transactions.append(f'Withdrawal amount: ${amount}')\n print(\n f'{self.name} withdrawal ${amount} successfully! New Balance: ${self.my_balance}\\n')\n else:\n print('Insufficient balance!\\n')\n\n def available_balance(self):\n print(f'Available Balance: ${self.my_balance}\\n')\n\n def transfer_money(self, receiver_account, amount):\n if self.my_balance >= amount and self.email != receiver_account.email:\n self.my_balance -= amount\n receiver_account.my_balance += amount\n self.transactions.append(\n f\"Sent: ${amount} to {receiver_account.name}\")\n receiver_account.transactions.append(\n f\"Received: ${amount} from {self.name}\")\n print(\n f'${amount} has been sent successfully! from {self.name} to {receiver_account.name}\\n')\n elif self.email == receiver_account.email:\n print(f'Transaction failed!!! you can\\'t send money to you\\'re own account\\n')\n else:\n print('Transaction failed due to insufficient balance!\\n')\n\n def print_transaction_history(self):\n print(f'-------Transaction History of {self.name}-------\\n')\n for transaction in self.transactions:\n print(transaction)\n print('\\n-------------End------------\\n')\n\n def take_loan(self, amount):\n if self.my_loan > 0 and world_bank.bank_balance > self.my_balance * 2:\n print(\n f'Sorry, you can\\'t take loan twice, your previous loan amount is ${self.my_loan}\\n')\n elif world_bank.get_loan_status() == True and world_bank.bank_balance > self.my_balance * 2:\n if amount <= self.my_balance * 2:\n self.my_loan = amount\n self.transactions.append(f\"Given loan: ${self.my_loan}\")\n print(\n f'Congratulation you have successfully got loan of ${self.my_loan}\\n')\n else:\n print(\n f'Sorry, you can\\'t take loan more than twice the amount of your balance\\n')\n else:\n print(f'THE BANK IS BANKRUPT\\n sorry, you can\\'t take any loan now!\\n')\n\n\nworld_bank = Bank('World Bank')\n\n# Create user account\nuser1 = world_bank.create_bank_account('User1', 'user1@gmail.com', '#user1')\nuser2 = world_bank.create_bank_account('User2', 'user2@gmail.com', '#user2')\n\nprint(\"---------Deposits---------\\n\")\n# deposit\nuser1.deposit(2000)\nuser1.deposit(5000)\nuser2.deposit(9000)\n\nprint(\"---------Withdraw---------\\n\")\n# withdraw\nuser1.withdraw(500)\nuser2.withdraw(1000)\n\nprint(\"---------Balance Check---------\\n\")\n# balance check\nuser1.available_balance()\nuser2.available_balance()\n\nprint(\"---------Money Transfer---------\\n\")\n# transfers\nuser1.transfer_money(user1, 500)\nuser1.transfer_money(user2, 500)\nuser1.transfer_money(user2, 10000)\nuser2.transfer_money(user1, 5000)\n\n# transaction history\nuser1.print_transaction_history()\n# user2.print_transaction_history()\n\nprint(\"---------Loans---------\\n\")\n# loan\nuser1.take_loan(6000)\nuser2.take_loan(10000)\n\n\n# create admin account\nadmin = world_bank.create_admin_account('Admin', 'admin@gmail.com', '#admin')\n# admin.deposit(10000)\n\nprint(\"---------Total Balance---------\\n\")\n# available balance in the bank\nworld_bank.total_balance_of_bank(admin)\n\nprint(\"---------Total Loan---------\\n\")\n# total loan given by the bank\nworld_bank.total_loan(admin)\n\n# disable loan\nprint(\"-------Disabled loan---------\\n\")\nworld_bank.active_loan(admin)\nuser2.take_loan(2000)\n\n# enable loan\nprint(\"-------After Active Loan---------\\n\")\nworld_bank.active_loan(admin)\nuser2.take_loan(2000)\n\n# print(world_bank.bank_balance)\n","repo_name":"Nafis-Hasnat369/OPP-Python-Final-Exam","sub_path":"Banking Management System.py","file_name":"Banking Management System.py","file_ext":"py","file_size_in_byte":6223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22829262755","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nwith open('README.md') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'boto3'\n]\n\nsetup_requirements = ['pytest-runner', ]\n\ntest_requirements = ['pytest', ]\n\nsetup(\n author=\"John Hardy\",\n author_email='john@johnchardy.com',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n description=\"Server side sessions in Flask using AWS DynamoDB table as a data store\",\n install_requires=requirements,\n license=\"MIT license\",\n long_description=readme,\n long_description_content_type='text/markdown',\n include_package_data=True,\n keywords='flask-dynamodb-sessions',\n name='flask-dynamodb-sessions',\n packages=find_packages(include=['flask_dynamodb_sessions']),\n setup_requires=setup_requirements,\n test_suite='tests',\n tests_require=test_requirements,\n url='https://github.com/ibejohn818/flask-dynamodb-sessions',\n version='0.1.7',\n zip_safe=False,\n)\n","repo_name":"ibejohn818/flask-dynamodb-sessions","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"37247399305","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom tweet.models import Tweet\n\n\n@login_required()\ndef HomeView(request):\n html = 'index.html'\n user = request.user\n following_ids = user.following.values_list('id', flat=True)\n author_tweets = Tweet.objects.filter(author=user)\n following_tweets = Tweet.objects.filter(author__in=following_ids)\n tweets = author_tweets | following_tweets\n recent_tweets = tweets.distinct().order_by('-date')[:10]\n return render(request, html, {'user': user, 'tweets': recent_tweets})","repo_name":"zaska-afk/twitterclone-zaska-afk","sub_path":"config/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7626364604","text":"from MyBaseGradientView import *\n\nclass MyRectGradientView (MyBaseGradientView):\n def init(self):\n self = super(MyRectGradientView, self).init()\n if self is None:\n return self\n\n self.myOffsetPt = NSMakePoint(0.0, 0.0)\n return self\n\n def drawRect_(self, rect):\n self.resetGradient()\n\n # if the \"Radial Gradient\" checkbox is turned on, draw using 'myOffsetPt'\n if self.myIsRadial:\n self.myGradient.drawInRect_relativeCenterPosition_(self.bounds(), self.myOffsetPt)\n\n else:\n self.myGradient.drawInRect_angle_(self.bounds(), self.myAngle)\n","repo_name":"albertz/music-player","sub_path":"mac/pyobjc-framework-Cocoa/Examples/AppKit/Grady/MyRectGradientView.py","file_name":"MyRectGradientView.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"81"} +{"seq_id":"4696798669","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nA module containing most functions of the matching_pennies experiment and Object Classes.\nIt also contains built-in tests for the functions for when the module runs as __main__\n\n Functions:\n - input_comp_str() : Gathers input from the experimenter to choose the computer's strategy.\n - input_bias_weighting : Gathers input from experimenter to define probability of bias when strategies B or C are chosen.\n - strategy_a : Selects heads or tails with equal probability\n - strategy_b : Selects heads or tails with a given bias (h_bias) for heads\n - strategy_c : Selects heads or tails with a given bias (t_bias) for tails\n \n \n Classes:\n - Data : A class to store the relevant data this experiment yields. Includes a functions that generates a summary printout for the experimenter to see in the console when the trial is over.\n\"\"\"\nimport random\nimport mock\n\n#%% Functions for selecting computer's strategy and strategy details (in case they apply)\n\n#Common framework for the biased strategies (b and c) in a function that is used in: input_comp_str()\ndef input_bias_weighting(bias_strat):\n \"\"\"\n Receives input from experimenter to define the weighting of the bias for biased\n strategies b and c. It also computes the respective weighting for the opposite side,\n thus returning a list with two values. The first value is the bias towards the side \n of interest.\n \n This function is embedded in input_comp_str()\n\n Parameters\n ----------\n bias_strat : STR\n Meant to take the variable strategy when 'b' or 'c' have been entered.\n Raises error when this argument is not 'b' or 'c'\n\n Returns\n -------\n bias_weight : LIST with two FLOATs\n The first value is the bias towards the side of interest, the second is the remaining\n probability for the opposite side\n \n Raises\n -------\n ValueError when bias_strat is not 'b' or 'c'\n\n \"\"\"\n if bias_strat == 'b':\n txt = 'heads'\n elif bias_strat == 'c':\n txt = 'tails'\n else:\n error_msg = \"bias_strat entered was {}, but input_bias_weighting() only takes 'b' or 'c' as strategies\"\n raise ValueError(error_msg.format(bias_strat))\n \n #Ask experimenter for input \n bias_weight = input(\"Enter the bias towards {} as a proportion of 1.0, (e.g. 0.7 means {} will be selected, on average, 70% of the trials): \".format(txt, txt))\n #The following loops make sure that the experimenter inputs digits that are <= 1.0 \n while True:\n try:\n bias_weight = float(bias_weight)\n break\n except:\n bias_weight = input(\"Please enter the value in digits (characters are not allowed): \")\n while True:\n if 0 <= bias_weight <= 1:\n break\n else:\n bias_weight = input(\"Wrong value. Please enter a number between 0 and 1: \")\n while True:\n try:\n bias_weight = float(bias_weight)\n break\n except:\n bias_weight = input(\"Please enter the value in digits (characters are not allowed): \")\n \n bias_weight = [bias_weight, 1 - bias_weight] \n return bias_weight\n\n\n#Input from experimenter on selected strategy and bias weightings if applicable\ndef input_comp_str():\n \"\"\"\n Receives input from the experimenter to define the strategy that the computer will\n use during the game of matching pennies against the current subject. For the biased\n strategies it also takes input for the weighting of the bias.\n \n Arguments\n ----------\n Takes no arguments\n \n Returns: STR or tuple\n ----------\n The letter of the strategy as a string\n \n In the case of biased strategies (b and c) it returns a tuple containing the string 'b' or 'c' and\n the weighting of the bias as a nested list of two values, the first value corresponds to the bias\n weighting of the side of interest.\n \n Examples\n ---------\n x = input_comp_str()\n #experimenter inputs 'a'#\n print(x)\n a\n \n y = input_comp_str\n #experimenter inputs 'b' and then 0.9#\n print(y)\n ('b', [0.9, 0.09999999999999998])\n \n \n \"\"\"\n \n allowed_inputs = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n strategy = input(\"\"\"\n Type in the letter of the strategy you want the computer to use:\n a = Random. Computer chooses face randomly with no bias.\n b = Bias towards heads\n c = Bias towards tails\n d = Switch from own previous decision\n e = Choose opposite of subject's previous decision\n f = Choose subject's previous decision\n g = Assign one of the previous 6 strategies randomly\n \n strategy: \"\"\")\n \n #Clean input a bit to aid clumsy experimenters\n strategy = strategy.lower().strip()\n \n #Make sure experimenter has input a valid strategy\n while True:\n if strategy in allowed_inputs:\n break\n else:\n strategy = input(\"Please enter a valid letter: \")\n strategy = strategy.lower().strip()\n\n #If g is chosen, assign a strategy randomly\n if strategy == 'g':\n strategy = random.choice(allowed_inputs[:-1])\n print(\"\\nStrategy {} was selected.\".format(strategy))\n\n #Strategy b. Calls input_bias_weighting to collect input from experimenter\n if strategy == 'b':\n h_bias = input_bias_weighting(strategy)\n return strategy, h_bias\n \n #Strategy c\n if strategy == 'c':\n t_bias = input_bias_weighting(strategy)\n return strategy, t_bias\n \n #For all other strategies simply return strategy letter\n return strategy\n\n\n\n#%% Functions to generate computer response with each strategy\n\ncomp_options = ['h', 't']\n\n# A: Random\ndef strategy_a():\n \"\"\"\n Selects heads or tails with equal probability\n\n Returns\n -------\n comp_response : STR\n 'h' or 't'\n\n \"\"\"\n comp_response = random.choice(comp_options)\n return comp_response\n\n\n# B\ndef strategy_b(h_bias):\n \"\"\"\n Selects heads or tails with a given bias (h_bias) for heads\n\n Parameters\n ----------\n h_bias : float\n A float between 0 and 1 that defines the probability of heads being chosen\n\n Returns\n -------\n comp_response : str\n A string that is either 'h' or 't' representing the computer's choice for the round'\n\n \"\"\"\n comp_response = random.choices(comp_options, weights = h_bias, k=1)\n comp_response = comp_response[0]\n return comp_response\n\n# C\ndef strategy_c(t_bias):\n \"\"\"\n Selects heads or tails with a given bias (t_bias) for tails\n\n Parameters\n ----------\n t_bias : float\n A float between 0 and 1 that defines the probability of tails being chosen\n\n Returns\n -------\n comp_response : str\n A string that is either 'h' or 't' representing the computer's choice for the round'\n \"\"\"\n comp_response = random.choices(comp_options[::-1], weights = t_bias, k=1) \n comp_response = comp_response[0]\n return comp_response\n #comp_options inverted so weighting applies to 't'. \n #Could have been stored in reverse order in input_comp_str() but i preferred to\n #leave the first value as the bias towards what it says. So here comp_options are inverted.\n\n# Strategies D, E, F, and option G were kept in main for simplicity. The former under \"#%% Generate computer's response\" and the latter under \"#If g is chosen\" \n\n\n#%% Object classes\n\n#Object class: Data\n#Create the class of objects called Data that can store all data that is relevant in the experiment. This makes it easier for experimenters to add commands at the end of this script that extract their data to a file of their preference.\nclass Data:\n \"\"\" \n A class to store the relevant data this experiment yields.\n\n ...\n\n Attributes\n ----------\n subject_id : str\n A string to identify the subject / participant\n comps_strat : str\n A string to identify the computer's strategy during the trial\n opposite_own : int\n An integer equal to the number of times the participant chose the opposite of their own previous choice\n opposite_comp : int\n An integer equal to the number of times the participant chose the opposite of the computer's previous choice\n wins : int\n An integer equal to the number of rounds the participant won\n losses : int\n An integer equal to the number of rounds the participant lost against the computer\n rounds_played : int\n An integer equal to the number of rounds played during the trial (calculated summing wins and losses)\n \n\n Methods\n -------\n resultsPrintout()\n Prints a summary of the relevant results for the experimenter to see in the console when the trial is conlcuded.\n \"\"\"\n \n def __init__(self, subject_id, comps_strat, opposite_own, opposite_comp, wins, losses):\n \"\"\"\n Parameters\n ----------\n subject_id : str\n A string to identify the subject / participant\n comps_strat : str\n A string to identify the computer's strategy during the trial\n opposite_own : int\n An integer equal to the number of times the participant chose the opposite of their own previous choice\n opposite_comp : int\n An integer equal to the number of times the participant chose the opposite of the computer's previous choice\n wins : int\n An integer equal to the number of rounds the participant won\n losses : int\n An integer equal to the number of rounds the participant lost against the computer\n rounds_played : int\n An integer equal to the number of rounds played during the trial (calculated summing wins and losses)\n \"\"\"\n self.subject_id = subject_id\n self.comps_strat = comps_strat\n self.opposite_own = opposite_own\n self.opposite_comp = opposite_comp\n self.wins = wins\n self.losses = losses\n self.rounds_played = self.wins + self.losses\n \n def resultsPrintout(self):\n \"\"\"\n Prints a summary of the relevant results for the experimenter to see in the console when the trial is over.\n\n Returns\n -------\n Prints a string containing relevant data of the trial, namely: subject ID, rounds played, choosing opposite from previous choice, choosing opposite from computer's previous choice, and the final score'\n\n #It is possible that the sum of opposite_own and opposite_comp is not equal to the number of rounds.\n \"\"\"\n \n res_print = \"\"\"\\nResults of the trial\n\n Summary:\n Subject: {}\n Rounds played: {}\n Opposite from own previous decision: {}\n Opposite from computer's previous decision: {}\n \n Final score:\n Subj {} - {} Comp\"\"\"\n \n print(res_print.format(self.subject_id, self.rounds_played, self.opposite_own, self.opposite_comp, self.wins, self.losses))\n \n\n#%% Built-in tests \nif __name__ == '__main__':\n \n #Test input_bias_weighting(b). It uses the mock module to mock the user typing in the input.\n def test_input_bias_weighting(strat):\n \"\"\"\n A test function to test the input_bias_weighting function that gathers inputs for\n strategies b and c.\n\n Parameters\n ----------\n strat : STR\n A string that is either 'b' or 'c', indicating the strategy for which the output is intended.\n\n Returns\n -------\n Asserts whether input_bias_weighting is working as expected\n\n \"\"\"\n test_value = 0.876\n with mock.patch('builtins.input', return_value = test_value):\n assert input_bias_weighting(strat) == [test_value, 1 - test_value]\n #How to use mock.patch : https://forum.learncodethehardway.com/t/testing-input-and-print/1757\n \n try:\n test_input_bias_weighting('b')\n except AssertionError:\n print(\"test_input_bias_weighting('b') is not functioning adequately\")\n \n \n try:\n test_input_bias_weighting('c')\n except AssertionError:\n print(\"test_input_bias_weighting('c') is not functioning adequately\")\n \n \n \n #Test the bias weighting function raises an error with wrong input\n try:\n input_bias_weighting('d')\n print(\"input_bias_weighting is accepting 'd' as a valid argument when only 'b' and 'c' should be accepted\")\n except ValueError:\n pass\n \n \n \n #test strategy_a()\n #Being a 50-50 probabilistic function, it is highly unlikely that after 100 iterations the results are skewed beyond a +- .05 bias.\n a_list = []\n for i in range(1, 100):\n test_strategy_a = strategy_a()\n a_list.append(test_strategy_a)\n h_count = a_list.count('h')\n if 55 < h_count < 45:\n print('strategy_a() is very very likely to be malfunctioning. Run it a few times, this message should appear very very very rarely.')\n \n \n #test strategy_b()\n #The function is probabilistic, so the test runs by specifying a bias of 1.0 towards heads, meaning that all iterations\n #of the function must output 'h'. Otherwise, a warning is printed.\n b_list = []\n for i in range(1, 100):\n test_strategy_b = strategy_b([1, 0]) \n b_list.append(test_strategy_b)\n for i in b_list:\n if i == 't':\n print(\"strategy_b() is not generating computer decisions according to bias\")\n break\n \n #test strategy_c()\n #The function is probabilistic, so the test runs by specifying a bias of 1.0 towards tails, meaning that all iterations\n #of the function must output 't'. Otherwise, a warning is printed.\n c_list = []\n for i in range(1, 100):\n test_strategy_c = strategy_c([1, 0]) \n c_list.append(test_strategy_c)\n for i in c_list:\n if i == 'h':\n print('strategy_c() is not generating computer decisions according to bias')\n break\n","repo_name":"pedro-espinosa/matching_pennies","sub_path":"strategy_functions.py","file_name":"strategy_functions.py","file_ext":"py","file_size_in_byte":14101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42567925922","text":"import sys\nfrom collections import deque\nn, m = map(int, sys.stdin.readline().split())\nlandmap, maximum = [], 0\nfor _ in range(n):\n landmap.append(sys.stdin.readline().strip())\n\ndef bfs(i, j, cntMax):\n d = deque()\n d.append([i, j, 0])\n visited = [[False]*m for _ in range(n)]\n visited[i][j] = True\n\n dx = [0, 0, -1, 1]\n dy = [-1, 1, 0, 0]\n while d:\n x, y, cnt = d.popleft()\n for i in range(4):\n nx = x+dx[i]\n ny = y+dy[i]\n if nx >= n or ny >= m or nx < 0 or ny < 0:\n continue\n if not visited[nx][ny] and landmap[nx][ny] == \"L\":\n d.append((nx, ny, cnt+1))\n cntMax = cnt+1 if cntMax < cnt+1 else cntMax\n visited[nx][ny] = True\n return cntMax\n \nfor i in range(n):\n for j in range(m):\n if landmap[i][j] == \"L\":\n maximum = bfs(i, j, maximum)\nprint(maximum)","repo_name":"ah00ee/Algorithm","sub_path":"Sep2022/N2589.py","file_name":"N2589.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"239436308","text":"from __future__ import print_function\n\nimport sys\nimport os\nimport numpy as np\nimport shutil\n\nfrom fmpy import read_model_description\nfrom fmpy.cross_check import validate_result\nfrom fmpy.util import read_ref_opt_file\n\n\ndef read_csv(filename, variable_names=None):\n \"\"\" Read a CSV file that conforms to the FMI cross-check rules\n\n Parameters:\n filename name of the CSV file to read\n variable_names list of legal variable names\n\n Returns:\n traj the trajectoies read from the CSV file\n \"\"\"\n\n # pass an empty string as deletechars to preserve special characters\n traj = np.genfromtxt(filename, delimiter=',', names=True, deletechars='')\n\n # get the time\n time = traj[traj.dtype.names[0]]\n\n # check if the time is monotonically increasing\n if traj.size > 1 and np.any(np.diff(time) < 0):\n raise Exception(\"Values in first column (time) are not monotonically increasing\")\n\n # check if all variables exist in the FMU\n if variable_names is not None:\n for name in traj.dtype.names[1:]:\n if name not in variable_names:\n raise Exception(\"Variable '%s' does not exist in the FMU\" % name)\n\n return traj\n\n\ndef validate_test_fmu(model_dir):\n \"\"\" Validate an exported FMU\n\n Parameters:\n model_dir path to the directory that contains the exported FMU\n\n Returns:\n a list of problems\n \"\"\"\n\n problems = []\n\n # check file sizes\n for root, dirs, files in os.walk(model_dir):\n for file in files:\n filename = os.path.join(root, file)\n filesize = os.path.getsize(filename)\n maxsize = 10e6 if file.endswith('.fmu') else 1e6\n if filesize > maxsize:\n problems.append(\"%s is larger than %g MB (%g MB)\" % (filename, maxsize * 1e-6, filesize * 1e-6))\n\n if 'notCompliantWithLatestRules' in files:\n return problems # stop here\n\n path, model_name = os.path.split(model_dir)\n path, _ = os.path.split(path)\n path, _ = os.path.split(path)\n path, platform = os.path.split(path)\n path, fmi_type = os.path.split(path)\n _, fmi_version = os.path.split(path)\n\n fmu_filename = os.path.join(model_dir, model_name + '.fmu')\n\n # validate the modelDescription.xml\n try:\n model_description = read_model_description(fmu_filename, validate=True)\n except Exception as e:\n problems.append(\"Error in %s. %s\" % (fmu_filename, e))\n return problems # stop here\n\n # check FMI version\n if model_description.fmiVersion != fmi_version:\n problems.append(\"%s is not an FMI %s FMU\" % (fmu_filename, fmi_version))\n return problems # stop here\n\n # check FMI type\n if fmi_type == 'cs' and model_description.coSimulation is None:\n problems.append(\"%s does not support co-simulation\" % fmu_filename)\n return problems # stop here\n elif fmi_type == 'me' and model_description.modelExchange is None:\n problems.append(\"%s does not support model-exchange\" % fmu_filename)\n return problems # stop here\n\n # collect the variable names\n variable_names = [v.name for v in model_description.modelVariables]\n\n # check the reference options file\n try:\n ref_opts_filename = os.path.join(model_dir, model_name + '_ref.opt')\n read_ref_opt_file(ref_opts_filename)\n except Exception as e:\n problems.append(\"Error in %s. %s\" % (ref_opts_filename, e))\n\n # check the CSVs\n for suffix, required in [('_in.csv', False), ('_ref.csv', True)]:\n\n csv_filename = os.path.join(model_dir, model_name + suffix)\n\n if not required and not os.path.isfile(csv_filename):\n continue\n\n try:\n read_csv(csv_filename, variable_names=variable_names)\n except Exception as e:\n problems.append(\"Error in %s. %s\" % (csv_filename, e))\n\n return problems\n\n\ndef validate_cross_check_result(result_dir):\n \"\"\" Validate a cross-check result\n\n Parameters:\n result_dir path to the directory that contains the results\n\n Returns:\n a list of problems\n \"\"\"\n\n problems = []\n\n t = segments(result_dir)\n\n fmi_version, fmi_type, platform, importing_tool_name, importing_tool_version, exporting_tool_name, exporting_tool_version, model_name = t[-8:]\n\n repo_dir = os.path.join(*t[:-9])\n\n fmu_dir = os.path.join(repo_dir, 'fmus', fmi_version, fmi_type, platform, exporting_tool_name, exporting_tool_version, model_name)\n\n ref_filename = os.path.join(fmu_dir, model_name + '_ref.csv')\n opt_filename = os.path.join(fmu_dir, model_name + '_ref.opt')\n\n # check file sizes\n for root, dirs, files in os.walk(result_dir):\n for file in files:\n filename = os.path.join(root, file)\n filesize = os.path.getsize(filename)\n if filesize > 1e6:\n problems.append(\"%s is larger than 1 MB (%.1f MB)\" % (filename, filesize * 1e-6))\n\n _, model_name = os.path.split(result_dir)\n\n not_compliant_file = os.path.join(result_dir, 'notCompliantWithLatestRules')\n passed_file = os.path.join(result_dir, 'passed')\n\n if os.path.isfile(not_compliant_file) or not os.path.isfile(passed_file):\n return problems # stop here\n\n # check the output file\n res_filename = os.path.join(result_dir, model_name + '_out.csv')\n\n try:\n result = read_csv(res_filename)\n except Exception as e:\n problems.append(\"Error in %s. %s\" % (res_filename, e))\n return problems # stop here\n\n try:\n reference = read_csv(ref_filename)\n opt = read_ref_opt_file(opt_filename)\n except Exception as e:\n problems.append(\"Error in %s. %s\" % (res_filename, e))\n return problems # stop here\n\n problem = validate_result(result=result, reference=reference, t_start=opt['StartTime'], t_stop=opt['StopTime'])\n\n if problem is not None:\n problems.append(\"Error in %s. %s\" % (res_filename, problem))\n\n return problems\n\n\ndef segments(path):\n \"\"\" Split a path into segments \"\"\"\n\n s = []\n\n head, tail = os.path.split(path)\n\n while tail:\n s.insert(0, tail)\n head, tail = os.path.split(head)\n\n s.insert(0, head)\n\n return s\n\n\ndef validate_repo(vendor_dir, clean_up=False):\n\n problems = []\n\n s = segments(vendor_dir)\n\n result_count = 0\n\n # validate the cross-check results\n for subdir, dirs, files in os.walk(os.path.join(vendor_dir, 'results')):\n\n t = segments(subdir)\n\n if len(t) - len(s) != 9:\n continue\n\n result_count += 1\n\n fmi_version, fmi_type, platform, importing_tool_name, importing_tool_version, exporting_tool_name, exporting_tool_version, model_name = t[-8:]\n\n if fmi_version not in ['1.0', '2.0']:\n continue\n\n if fmi_type not in ['cs', 'me']:\n continue\n\n if platform not in ['c-code', 'darwin64', 'linux32', 'linux64', 'win32', 'win64']:\n continue\n\n new_problems = validate_cross_check_result(subdir)\n\n if new_problems and clean_up:\n passed_file = os.path.join(subdir, 'passed')\n if os.path.isfile(passed_file):\n print(\"Removing %s\" % passed_file)\n os.remove(passed_file)\n\n problems += new_problems\n\n fmu_count = 0\n\n # validate the test FMUs\n for subdir, dirs, files in os.walk(os.path.join(vendor_dir, 'fmus')):\n\n t = segments(subdir)\n\n if len(t) - len(s) != 7:\n continue\n\n fmu_count += 1\n\n fmi_version, fmi_type, platform, tool_name, tool_version, model_name = t[-6:]\n\n if fmi_version not in ['1.0', '2.0']:\n continue\n\n if fmi_type not in ['cs', 'me']:\n continue\n\n if platform not in ['c-code', 'darwin64', 'linux32', 'linux64', 'win32', 'win64']:\n continue\n\n new_problems = validate_test_fmu(subdir)\n\n if new_problems and clean_up:\n not_compliant_file = os.path.join(subdir, 'notCompliantWithLatestRules')\n print(\"Adding %s\" % not_compliant_file)\n with open(not_compliant_file, 'a'):\n pass\n\n problems += new_problems\n\n return fmu_count, result_count, problems\n\n\nif __name__ == '__main__':\n\n import argparse\n import textwrap\n\n description = \"\"\"\\\n Validate cross-check results and test FMUs in vendor repositories \n \"\"\"\n\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description=textwrap.dedent(description))\n\n parser.add_argument('xc_repo', default=os.getcwd(), nargs='?', help=\"path to the vendor repository\")\n parser.add_argument('--clean-up', action='store_true', help=\"remove 'passed' or add 'notCompliantWithLatestRules' file\")\n\n args = parser.parse_args()\n\n fmu_count, result_count, problems = validate_repo(args.xc_repo, args.clean_up)\n\n print()\n print(\"#################################\")\n print(\"%d problems found in %s\" % (len(problems), args.xc_repo))\n print(\"Validated %d FMUs and %d results\" % (fmu_count, result_count))\n print(\"#################################\")\n print()\n\n for problem in problems:\n print()\n print(problem)\n\n sys.exit(len(problems))\n","repo_name":"CATIA-Systems/FMPy","sub_path":"fmpy/cross_check/validate_vendor_repo.py","file_name":"validate_vendor_repo.py","file_ext":"py","file_size_in_byte":9267,"program_lang":"python","lang":"en","doc_type":"code","stars":344,"dataset":"github-code","pt":"81"} +{"seq_id":"5506604251","text":"import random\r\n\r\n\r\ndef adivina_el_numero_computadora (x):\r\n\r\n print('=======================================')\r\n print(' ¡Bienvenido(a) al Juego sapo embarao! ')\r\n print('=======================================')\r\n print(f'Selecciona un número entre 1 y {x} para que la computadora intente adivinarlo...')\r\n\r\n limite_inferior = 1\r\n limite_superior = x\r\n\r\n respuesta = ''\r\n while respuesta != 'c':\r\n if limite_inferior != limite_superior:\r\n prediccion = random.randint(limite_inferior, limite_superior)\r\n else:\r\n prediccion = limite_inferior #También podría ser limite superior\r\n\r\n respuesta = input(f'Mi prediccion es {prediccion}. Si es alta, ingresa (a). Si es baja, ingresa (b). Si es correcta, ingresa (c): ').lower()\r\n\r\n if respuesta == 'a':\r\n limite_superior = prediccion - 1\r\n elif respuesta == 'b':\r\n limite_inferior = prediccion + 1\r\n\r\n print(f'La computadora adivino correctamente: {prediccion}')\r\n\r\n\r\nadivina_el_numero_computadora(10)\r\n","repo_name":"Francocv97/5_proyectos_basicos_Python","sub_path":"Adivina_el_Numero_computadora.py","file_name":"Adivina_el_Numero_computadora.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29090025329","text":"# -------------------------\n# 要求:\n# 1、无边框,无标题栏\n# 2、窗口半透明那\n# 3、自定义最大化,最小化,关闭按钮\n# 4、支持拖拽用户区移动\n# -------------------------\nfrom PyQt5 import QtGui\nfrom PyQt5.Qt import *\nimport sys\n\n\n# class MyButton(QPushButton):\n# def __init__(self, parent=None, mode=1):\n# super(MyButton, self).__init__(parent)\n# self.mode = mode\n#\n# def mousePressEvent(self, e: QtGui.QMouseEvent) -> None:\n# super(MyButton, self).mousePressEvent(e)\n# if self.mode == 1:\n# self.parent().showMinimized()\n# elif self.mode == 2:\n# self.parent().showMaximized()\n# elif self.mode == 3:\n# self.parent().close()\n# e.accept()\n\n\nclass Window(QWidget):\n def __init__(self):\n super(Window, self).__init__()\n self.mouse_y = None\n self.mouse_x = None\n self.origin_y = None\n self.origin_x = None\n self.min_button = None\n self.max_normal_button = None\n self.close_button = None\n self.setWindowTitle('自定义窗口')\n self.resize(500, 500)\n self.move(400, 400)\n self.setWindowFlag(Qt.FramelessWindowHint)\n self.setup_ui()\n\n # 槽函数区\n def slot_max_normal_window(self):\n if self.isMaximized():\n print(1)\n self.max_normal_button.setText('最大化')\n self.showNormal()\n else:\n print(2)\n self.max_normal_button.setText('恢复')\n self.showMaximized()\n\n # 重构区\n # 重构鼠标按下事件\n def mousePressEvent(self, e: QtGui.QMouseEvent) -> None:\n self.origin_x = self.x()\n self.origin_y = self.y()\n self.mouse_x = e.globalX()\n self.mouse_y = e.globalY()\n\n # 重构鼠标移动事件\n def mouseMoveEvent(self, e: QtGui.QMouseEvent) -> None:\n move_x = e.globalX() - self.mouse_x\n move_y = e.globalY() - self.mouse_y\n self.move(self.origin_x + move_x, self.origin_y + move_y)\n\n # 配置窗口\n def setup_ui(self):\n self.min_button = QPushButton(self)\n self.min_button.setText('最小化')\n self.min_button.clicked.connect(self.showMinimized)\n\n self.max_normal_button = QPushButton(self)\n self.max_normal_button.setText('最大化')\n self.max_normal_button.move(120, 0)\n self.max_normal_button.clicked.connect(self.slot_max_normal_window)\n\n self.close_button = QPushButton(self)\n self.close_button.setText('关闭')\n self.close_button.move(240, 0)\n self.close_button.clicked.connect(self.close)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = Window()\n window.show()\n sys.exit(app.exec())\n","repo_name":"sunnkey/pro_PyQt5","sub_path":"案例/qt_widget/自定义窗口.py","file_name":"自定义窗口.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8722397116","text":"import tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow import keras\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\n\n\ndef makedataset():\n with open('log_word2vec/final_embeddings.pkl', 'rb') as f:\n final_embeddings = pickle.load(f)\n with open('log_word2vec/reverse_dictionary.pkl', 'rb') as f:\n reverse_dictionary = pickle.load(f)\n with open('log_word2vec/dictionary.pkl', 'rb') as f:\n dictionary = pickle.load(f)\n with open('data/vocabulary.pkl', 'rb') as f:\n vocabulary = pickle.load(f)\n \n # print(reverse_dictionary) # {0: 'UNK', 1: 'result_0', 2: 'call_duration_0', 3: 'ring_time_0', 4: 'is_spam', 5: 'cause_08F90', 6: 'is_not_spam', 7: 'cause_08090', 8: 'ring_time_30', 9: 'calling_hlr_01', 10: 'called_hlr_0579',\n # print(dictionary) # {'UNK': 0, 'result_0': 1, 'call_duration_0': 2, 'ring_time_0': 3, 'is_spam': 4, 'cause_08F90': 5, 'is_not_spam': 6, 'cause_08090': 7, 'ring_time_30': 8, 'calling_hlr_01': 9, 'called_hlr_0579': 10, 'called_hlr_0574': 11,\n print(vocabulary[:9])\n X = []\n y = []\n for i in range(len(vocabulary)//9):\n calling_hlr = vocabulary[i*9+0]\n called_hlr = vocabulary[i*9+1]\n user_id = vocabulary[i*9+2]\n clock = vocabulary[i*9+3]\n spam = vocabulary[i*9+4]\n ring = vocabulary[i*9+5]\n duration = vocabulary[i*9+6]\n result = vocabulary[i*9+7]\n cause = vocabulary[i*9+8]\n \n tmp = []\n index = dictionary.get(calling_hlr, 0)\n calling_hlr = final_embeddings[index]\n index = dictionary.get(called_hlr, 0)\n called_hlr = final_embeddings[index]\n index = dictionary.get(user_id, 0)\n user_id = final_embeddings[index]\n index = dictionary.get(clock, 0)\n clock = final_embeddings[index]\n index = dictionary.get(ring, 0)\n ring = final_embeddings[index]\n index = dictionary.get(duration, 0)\n duration = final_embeddings[index]\n index = dictionary.get(result, 0)\n result = final_embeddings[index]\n index = dictionary.get(cause, 0)\n cause = final_embeddings[index]\n \n tmp.extend(calling_hlr)\n tmp.extend(called_hlr)\n tmp.extend(user_id)\n tmp.extend(clock)\n tmp.extend(ring)\n tmp.extend(duration)\n tmp.extend(result)\n tmp.extend(cause)\n \n X.append(np.array(tmp))\n if spam == \"is_not_spam\":\n y.append(0)\n else:\n y.append(1)\n print(len(X), len(y), sum(y))\n with open(\"data/embeddings/X.pkl\", 'wb') as f:\n pickle.dump(X, f)\n with open(\"data/embeddings/y.pkl\", 'wb') as f:\n pickle.dump(y, f)\n \n \n \n \n \n \n\ndef train():\n with open('data/embeddings/X.pkl', 'rb') as f:\n X = pickle.load(f)\n with open('data/embeddings/y.pkl', 'rb') as f:\n y = pickle.load(f)\n \n X = np.array(X)\n y = np.array(y)\n train_data, test_data, train_labels, test_labels = train_test_split(X, y, test_size=0.2)\n print(\"Training entries: {}, labels: {}\".format(len(train_data), len(train_labels)))\n \n model = keras.Sequential([\n keras.layers.Dense(input_shape=[len(train_data[0])], activation=tf.nn.sigmoid, units=9),\n #keras.layers.Dense(30, activation=tf.nn.relu),\n #keras.layers.Dense(20, activation=tf.nn.relu),\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\n ])\n \n print(model.summary())\n \n model.compile(optimizer=tf.train.AdamOptimizer(),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n \n x_val = train_data[:1000]\n partial_x_train = train_data[1000:]\n \n y_val = train_labels[:1000]\n partial_y_train = train_labels[1000:]\n \n history = model.fit(train_data,\n train_labels,\n epochs=40,\n batch_size=32,\n validation_data=(test_data, test_labels),\n verbose=1)\n results = model.evaluate(test_data, test_labels)\n print(results)\n\n history_dict = history.history\n\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(1, len(acc) + 1)\n\n # \"bo\" is for \"blue dot\"\n plt.plot(epochs, loss, 'bo', label='Training loss')\n # b is for \"solid blue line\"\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig('figures/train_loss.png')\n plt.show()\n \n plt.clf() # clear figure\n acc_values = history_dict['acc']\n val_acc_values = history_dict['val_acc']\n\n plt.plot(epochs, acc, 'bo', label='Training acc')\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.savefig('figures/train_acc.png')\n plt.show()\n\n\nif __name__ == '__main__':\n print(tf.__version__)\n # makedataset()\n train()\n \n\n\n\n\n\n\n","repo_name":"ZhikunWei/my_graduate_paper_code","sub_path":"tf_nn_baseline.py","file_name":"tf_nn_baseline.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"289801260","text":"import os\nimport logzero\nimport datetime\nimport logging\nfrom pythonjsonlogger import jsonlogger\n\n\nclass LoggerHelper():\n def __init__(self):\n pass\n\n @staticmethod\n def json_logger():\n json_format = LoggerHelper.json_formatter()\n logger1 = logzero.setup_logger(name=\"test_log\", logfile=\"test_log.log\", formatter=LoggerHelper.json_formatter())\n return logger1\n\n @staticmethod\n def json_formatter():\n jsonFormat = CustomJsonFormatter('(level) (message) (timestamp)')\n return jsonFormat\n\n @staticmethod\n def simple_formatter():\n simpleFormat = SimpleFormatter.formatter()\n return simpleFormat\n\n\nclass CustomJsonFormatter(jsonlogger.JsonFormatter):\n def add_fields(self, log_record, record, message_dict):\n super(CustomJsonFormatter, self).add_fields(log_record, record, message_dict)\n if not log_record.get('timestamp'):\n # this doesn't use record.created, so it is slightly off\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n log_record['timestamp'] = now\n if log_record.get('level'):\n log_record['level'] = log_record['level'].upper()\n else:\n log_record['level'] = record.levelname\n\n\nclass SimpleFormatter:\n @staticmethod\n def formatter():\n formatter = '%(color)s[%(levelname)1.1s]%(end_color)s ' \\\n '%(color)s[%(message)s]%(end_color)s ' \\\n '%(color)s[%(asctime)s]%(end_color)s'\n log_zero_formatter = logzero.LogFormatter(fmt=formatter)\n return log_zero_formatter\n","repo_name":"kikiwisaka/bbm_automated_testing","sub_path":"helper/custom_logger.py","file_name":"custom_logger.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13588141839","text":"from django import forms\nfrom contacts.models import Contact\nfrom common.models import Comment\n\n\nclass ContactForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n assigned_users = kwargs.pop('assigned_to', [])\n contact_account = kwargs.pop('account', [])\n super(ContactForm, self).__init__(*args, **kwargs)\n for field in self.fields.values():\n field.widget.attrs = {\"class\": \"form-control\"}\n self.fields['description'].widget.attrs.update({\n 'rows': '6'})\n self.fields['assigned_to'].queryset = assigned_users\n self.fields['account'].queryset = contact_account\n self.fields['assigned_to'].required = False\n self.fields['teams'].required = False\n\n class Meta:\n model = Contact\n fields = (\n 'assigned_to', 'teams', 'first_name', 'last_name', 'account', 'email', 'phone', 'address', 'description'\n )\n\n def clean_phone(self):\n client_phone = self.cleaned_data.get('phone', None)\n try:\n if int(client_phone) and not client_phone.isalpha():\n ph_length = str(client_phone)\n if len(ph_length) < 10 or len(ph_length) > 13:\n raise forms.ValidationError('Phone number must be minimum 10 Digits and maximum of 13 Digits')\n except (ValueError):\n raise forms.ValidationError('Phone Number should contain only Numbers')\n return client_phone\n\n\nclass ContactCommentForm(forms.ModelForm):\n comment = forms.CharField(max_length=64, required=True)\n\n class Meta:\n model = Comment\n fields = ('comment', 'contact', 'commented_by')\n","repo_name":"samharden/open-cms-app","sub_path":"contacts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"26986108995","text":"#encoding: utf-8\r\n# import requests\r\nfrom requests import get\r\nfrom re import findall\r\nfrom time import sleep\r\nimport pandas as pd\r\nfrom bs4 import BeautifulSoup\r\nfrom tqdm import tqdm\r\nimport os\r\nimport tkinter as tk\r\nfrom tkinter import filedialog\r\n\r\n\r\n\r\n\r\n# 定义参数\r\nheaders = {'Host':'www.xbiao.com',\r\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36 Edg/89.0.774.50',\r\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\r\n 'Accept-Language':'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',\r\n 'Accept-Encoding':'gzip, deflate',\r\n 'Connection':'keep-alive'}\r\nlist_url = 'http://www.xbiao.com/search/index?tp=p&wd=%(key)s' #腕表之家,以货号查询\r\n#list_url = 'http://s.taobao.com/search?q=%(key)s&ie=utf8&filter_tianmao=tmall&s=%(page)d&filter=%(fprice)s&auction_tag[]=%(new)d'\r\n#cookies ='bdshare_firstime=1614857012880; block=宇舶$http://bbs.xbiao.com/hublot/|欧米茄$http://bbs.xbiao.com/omega/; BIGipServertop-xbiao-web=4274523914.20480.0000; Hm_lvt_a614a6a498e45b564870acd236f789d4=1614912452,1614912520,1615360218,1615538221; PHPSESSID=meua750cli17bnt7gfgc82rls5; Hm_lpvt_a614a6a498e45b564870acd236f789d4=1615540393'\r\n\r\n# 正则模式\r\np_img = '<img src=\"(.*)\" alt' #图片URL\r\n\r\n\r\ndef Get_watchinfo(dirpath,filename):\r\n fpath = os.path.join(dirpath, filename)\r\n watch = pd.read_excel(fpath)\r\n try:\r\n pcode = watch['厂商货号']\r\n savebasic = filename.split('.')[0] + \"_basic.csv\"\r\n saveparam = filename.split('.')[0] + \"_param.csv\"\r\n\r\n col_basic = ['图片链接', '商品名称', '系列', '款式', '材质', '价格', '商品网站链接', '喜欢度']\r\n col_param = ['编号', '品牌', '系列', '机芯类型', '性别',\r\n '机芯型号', '机芯类型', '机芯直径', '机芯厚度', '振频', '宝石数', '电池寿命', '动力储备', '技术认证',\r\n '表径', '表壳厚度', '表盘颜色', '表盘形状', '表带颜色', '表扣类型', '背透', '重量', '防水深度', '表扣间距', '表耳间距',\r\n '表壳材质', '表盘材质', '表镜材质', '表冠材质', '表带材质', '表扣材质', '其他功能']\r\n watch_basic = {}\r\n watch_data = {}\r\n # p_basic #['照片路径','商品名称', '基础系列:', '款式:(机芯、size、男女)', '基础材质:', '价格:','商品详情url','喜欢']\r\n # 数据爬取\r\n\r\n for i in tqdm(range(pcode.shape[0])):\r\n pkey = pcode[i].strip(\"\\t\")\r\n print(\"开始查询:\", pkey)\r\n dic_basic = dict.fromkeys(col_basic, '-')\r\n dic_param = dict.fromkeys(col_param, '-')\r\n try:\r\n # 进入查询主界面爬取基础信息\r\n url = list_url % {'key': pkey}\r\n res = get(url, headers=headers)\r\n res.encoding = 'utf-8'\r\n html = res.text\r\n # print(html)\r\n img = findall(p_img, html)\r\n soup = BeautifulSoup(html, 'lxml')\r\n p_items = []\r\n for i in soup.find_all('ul', class_=\"s_attr\"):\r\n for j in i.find_all('li'):\r\n p_items.append(j.get_text())\r\n purl = p_items[-1]\r\n purl_param = purl + \"param.html\"\r\n dic_basic['图片链接'] = img[0]\r\n dic_basic['商品名称'] = p_items[0]\r\n for i1 in range(1, len(p_items) - 1):\r\n pitem = p_items[i1].split(':')\r\n if len(pitem) == 2:\r\n if pitem[0] == '系列':\r\n dic_basic['系列'] = pitem[1]\r\n elif pitem[0] == '款式':\r\n dic_basic['款式'] = pitem[1]\r\n elif pitem[0] == '材质':\r\n dic_basic['材质'] = pitem[1]\r\n elif pitem[0] == '价格':\r\n dic_basic['价格'] = pitem[1]\r\n dic_basic['商品网站链接'] = purl\r\n # 进入商品详情url爬取喜欢数量\r\n res_p = get(purl, headers=headers)\r\n res_p.encoding = 'utf-8'\r\n html_p = res_p.text\r\n soup_p = BeautifulSoup(html_p, 'lxml')\r\n for li in soup_p.find_all('div', class_=\"handle_btn clearfix\"):\r\n like = li.get_text().split('\\n')[1]\r\n dic_basic['喜欢度'] = like\r\n watch_basic[pkey] = dic_basic\r\n # print(type(keydic), type(dic), dic['商品名称'], keydic[pkey])\r\n # 进入商品的详细参数url爬取详细基础信息\r\n res_param = get(purl_param, headers=headers)\r\n res_param.encoding = 'utf-8'\r\n html_param = res_param.text\r\n soup_param = BeautifulSoup(html_param, 'lxml')\r\n p_param = []\r\n for ii in soup_param.find_all('td', class_=\"param_info_txt\"):\r\n for jj in ii.find_all('li'):\r\n p_param.append(jj.get_text())\r\n # print('p_param', len(p_param), p_param)\r\n param = []\r\n fuc_num = 0\r\n for k in range(len(p_param)):\r\n pt = p_param[k].split(':')\r\n if len(pt) == 2:\r\n if pt[0] in dic_param.keys():\r\n dic_param[pt[0]] = pt[1]\r\n else:\r\n pt = p_param[k].split('材质')\r\n if len(pt) == 2:\r\n pt_name = pt[0] + '材质'\r\n if pt_name in dic_param.keys():\r\n dic_param[pt_name] = pt[1]\r\n else:\r\n fuc_num += 1\r\n if fuc_num == 1:\r\n dic_param['其他功能'] = p_param[k]\r\n else:\r\n dic_param['其他功能'] += '&' + p_param[k]\r\n watch_data[pkey] = dic_param\r\n print(pkey, '查询成功!')\r\n sleep(3)\r\n except:\r\n print(pkey, \"查询失败,继续\")\r\n continue\r\n\r\n res_basic = []\r\n res_param = []\r\n for key, value in watch_data.items():\r\n res_param.append(value)\r\n for key, value in watch_basic.items():\r\n res_basic.append(value)\r\n pd_param = pd.DataFrame(res_param, columns=col_param)\r\n pd_basic = pd.DataFrame(res_basic, columns=col_basic)\r\n pd_basic.fillna(' ', inplace=True)\r\n pd_param.fillna(' ', inplace=True)\r\n pd_param.to_csv(os.path.join(dirpath, saveparam), encoding='utf-8_sig', index=False)\r\n pd_basic.to_csv(os.path.join(dirpath, savebasic), encoding='utf-8_sig', index=False)\r\n print(\"==========\", filename, '保存成功!==========')\r\n except:\r\n print('请确保手表的查询货号标题为 ”厂商货号”')\r\n\r\n\r\nif __name__ == '__main__':\r\n print(\"==========使用事项==========\")\r\n print(\"1.选取文件夹,手表文件需为xlsx后缀名\")\r\n print(\"2.手表文件内仅支持1个sheet,第一行为标题,其中查询的手表货号标题需为“厂商货号”\")\r\n #打开选择文件夹对话框\r\n root = tk.Tk()\r\n root.withdraw()\r\n dirpath = filedialog.askdirectory() #获取选择好的文件夹\r\n # dirpath = r\"C:\\Users\\HTDF\\Desktop\\PY\\watch\\lyq\"\r\n filelist = []\r\n for filename in os.listdir(dirpath):\r\n if os.path.splitext(filename)[1] == '.xlsx':\r\n filelist.append(filename)\r\n for fn in filelist:\r\n print('==========开始处理:',fn,\"==========\")\r\n Get_watchinfo(dirpath,fn)","repo_name":"yqlin9512/GetData_Watch","sub_path":"GetData_watch.py","file_name":"GetData_watch.py","file_ext":"py","file_size_in_byte":8126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32283862241","text":"import os\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport sklearn.metrics\nimport numpy as np\n\ndef test(dataset_name,data, mask,label, epoch):\n\n model_root = os.path.join('..', 'models')\n cuda = True\n cudnn.benchmark = True\n batch_size = 128\n max_length = 100\n input_dim = 55\n alpha = 0\n label = label.reshape(-1)\n \"\"\"load data\"\"\"\n\n # dataset = torch.utils.data.TensorDataset(torch.Tensor(data),torch.Tensor(mask),torch.LongTensor(label))\n dataset = torch.utils.data.TensorDataset(torch.Tensor(data),\n torch.LongTensor(label))\n\n dataloader = torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=8\n )\n\n \"\"\" training \"\"\"\n\n my_net = torch.load(os.path.join(\n model_root, 'DANN_model_epoch_' + str(epoch) + '.pth'\n ))\n my_net = my_net.eval()\n\n if cuda:\n my_net = my_net.cuda()\n\n len_dataloader = len(dataloader)\n data_target_iter = iter(dataloader)\n\n i = 0\n n_total = 0\n n_correct = 0\n pred_label_np = np.zeros(label.shape)\n while i < len_dataloader:\n\n # test model using target data\n data_target = data_target_iter.next()\n t_img,t_label = data_target\n\n batch_size = len(t_label)\n\n input_img = torch.FloatTensor(batch_size, max_length, input_dim)\n # input_mask = torch.FloatTensor(batch_size,max_length, 3)\n class_label = torch.LongTensor(batch_size)\n\n if cuda:\n t_img = t_img.cuda()\n t_label = t_label.cuda()\n # t_mask = t_mask.cuda()\n # input_mask = input_mask.cuda()\n input_img = input_img.cuda()\n class_label = class_label.cuda()\n\n input_img.resize_as_(t_img).copy_(t_img)\n # input_mask.resize_as_(t_mask).copy_(t_mask)\n class_label.resize_as_(t_label).copy_(t_label)\n inputv_img = Variable(input_img)\n # inputv_mask = Variable(input_mask)\n classv_label = Variable(class_label)\n\n class_output,_ = my_net(input_data=inputv_img, alpha = alpha)\n pred = class_output.data.max(1, keepdim=True)[1]\n n_correct += pred.eq(classv_label.data.view_as(pred)).cpu().sum()\n n_total += batch_size\n # print(batch_size)\n # print(pred.shape)\n pred_label_np[i*128:i*128+batch_size] = pred.cpu().numpy().reshape(-1)\n i += 1\n\n accu = n_correct.double()/ n_total\n\n print('epoch: %d, accuracy of the %s dataset: %f' % (epoch, dataset_name, accu))\n\n\n # test_data = torch.FloatTensor(data).view(-1,max_length,input_dim)\n # test_label = label.reshape(-1)\n # if cuda:\n # test_data = test_data.cuda()\n #\n # testv_data = Variable(test_data)\n # class_output, _ = my_net(testv_data, alpha)\n # pred_label = class_output.data.max(1,keepdim=True)[1]\n # pred_label_np = pred_label.cpu().numpy().reshape(-1)\n target_names = ['class 0','class 1','class 2','class 3','class 4','class 5','class 6','class 7','class 8']\n conf_matrix = sklearn.metrics.confusion_matrix(label,pred_label_np)\n print(sklearn.metrics.classification_report(label, pred_label_np, target_names=target_names))\n print(conf_matrix)\n","repo_name":"mRSun15/kg-transfer-gann","sub_path":"code/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15062670625","text":"n = int(input())\nc = input().split()\nsum = 0\nfor i in range(n):\n c[i]=int(c[i])\n sum+=c[i]\nc.sort(reverse=True)\ns = 0\nfor i in range(n):\n s+=c[i]\n if(s > (sum/2)):\n print(i+1)\n break\n\n","repo_name":"vaishnavi-gupta18/Python_assignment","sub_path":"Project 1/160 A.py","file_name":"160 A.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2700460988","text":"rule, step, i, x, successor, total, current = [0,1,1,1,0,1,1,0], 0, 0, 0, [], [], [0] * 99 + [1]\ntotal = [current]\nwhile step < 100:\n\twhile i < 99:\n\t\tsuccessor.append(rule[4*x + 2*current[i] + current[i+1]])\n\t\ti, x = i + 1, current[i]\n\tsuccessor.append(rule[4*x + 2*current[i] + 0])\n\ttotal.append(successor)\n\tcurrent, successor, i, x, step = successor, [], 0, 0, step + 1\n\ni1, j1, i2, j2 = 50, 50, 100, 100 # indexical info\nfor i in range(i1, i2):\n\tfor j in range(j1, j2):\n\t\tprint(total[i][j],end='') # encoding\n","repo_name":"rjelavic/source","sub_path":"code/rule110.py","file_name":"rule110.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1988331909","text":"import pytz\nfrom datetime import datetime\nfrom task import celery\nfrom util import logger\nfrom db.model import History\nfrom pyfcm import FCMNotification\nfrom server.cache import redis\nfrom server.session import Session\nfrom util.config import config\nfrom db.service import (\n CommentService, HistoryService, PostRestrictionService, PostService, UserService\n)\n\n\n@celery.task(serializer='json')\ndef history_task(code, comment_id, from_id, owner_id, user_ids):\n \"\"\"\n Create comment history\n\n :param str code: post code\n :param int comment_id: comment identity\n :param int from_id: comment owner identity\n :param int owner_id: post owner identity\n :param list[int] user_ids: list of user ids\n \"\"\"\n ps = PostService()\n\n # get post by post code\n post = ps.get_by_code(code)\n\n if post is None:\n logger.error('The post could not found.')\n return\n\n us = UserService()\n prs = PostRestrictionService()\n\n # get restrictions by post id\n restrictions = prs.get_by_post_id(post.pid)\n\n user_ids = []\n for restriction in restrictions:\n # get user by user id\n user = us.get_by_id(restriction.user_id)\n\n # skip if user could not be found\n if user is None:\n logger.error(f'The user #{restriction.user_id} could not found.')\n continue\n\n # populate user id list\n user_ids.append(user.uid)\n\n # increase history unread count\n redis.incr(f'history.{user.username}')\n\n hs = HistoryService()\n\n history = History()\n history.post_id = post.pid\n history.user_id = from_id\n history.comment_id = comment_id\n history.user_ids = user_ids\n history.create_time = datetime.now(tz=pytz.UTC)\n\n # create history\n hs.create(history)\n\n # get comment owner user by user id\n from_user = us.get_by_id(from_id)\n\n if from_user is None:\n logger.error(f'The user #{from_id} is missing.')\n return\n\n cs = CommentService()\n\n # get comment by comment id\n comment = cs.get_by_id(comment_id)\n\n if comment is None:\n logger.error(f'The comment #{comment_id} is missing.')\n return\n\n tokens = []\n for user_id in user_ids + [owner_id]:\n # the comment owner should not receive any notification about the comment\n if user_id == from_id:\n continue\n\n # get user by user id\n user = us.get_by_id(user_id)\n\n if user is None:\n logger.error(f'The user #{user_id} is missing.')\n return\n\n # get list of tokens\n members = Session.get_tokens(user_id)\n\n # populate tokens\n if members is not None:\n tokens += members\n\n ps = FCMNotification(api_key=config.firebase.key)\n\n # notification title\n title = user.username\n\n # check iOS character limit, iOS limit is 180\n if len(comment.message) > 180:\n limit = 200 if len(comment.message) > 200 else len(comment.message)\n location = comment.message[180:limit].find(' ')\n\n if location > -1:\n message = comment.message[0:180 + location] + ' ...'\n else:\n message = comment.message[0:180] + ' ...'\n else:\n message = comment.message\n\n # send notification to multiple device\n ps.notify_multiple_devices(\n registration_ids=tokens,\n message_title=title,\n message_body=message,\n )\n\n logger.info(f'Comment history created for #{comment_id}.')\n","repo_name":"trK54Ylmz/spectrome-backend","sub_path":"task/comment/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39216046041","text":"import pandas as pd\r\nfrom sitaka import sitaka\r\nfrom sklearn import svm, metrics, preprocessing\r\nfrom sklearn.model_selection import train_test_split, cross_val_score\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom gensim.models import Word2Vec\r\nfrom nltk.tokenize import word_tokenize, sent_tokenize\r\nfrom gensim.models import KeyedVectors\r\nimport numpy as np\r\n#data\r\ntrainFileName = \"../../data_train/task1-train.csv\"\r\ntestFileName = \"../../data_test/task1-test.csv\"\r\nWord2VecFileName = \"./data/frWac.bin\"\r\n\r\n#load data\r\ntrainFile = pd.read_csv(trainFileName, sep = \"\\t\", header = 7, names =[\"index\",\"tweet\",\"label\"])\r\ntestFile = pd.read_csv(testFileName, sep = \"\\t\", header = 7, names =[\"index\",\"tweet\"])\r\n\r\n\r\n#load word2vec\r\n\r\n#model = Word2Vec(sent_tokenize(\". \".join(trainFile[\"tweet\"])), size=100)\r\n#model = KeyedVectors.load_word2vec_format(Word2VecFileName)\r\n#print(model)\r\nsitaka = sitaka()\r\nX = []\r\nY = []\r\nle = preprocessing.LabelEncoder()\r\nle.fit([\"mixed\", \"negative\", \"objective\", \"positive\"])\r\nY = le.transform(trainFile[\"label\"])\r\nfor tweet in trainFile[\"tweet\"]:\r\n T = []\r\n tokens = sitaka.normalize(tweet)\r\n tag = sitaka.tag(tweet)\r\n T = T + sitaka.nb_syntactic_features(tag)\r\n T = T + sitaka.bow_features(tokens)\r\n T = T + sitaka.bonw_features(tokens)\r\n T = T + sitaka.bowo_features(tokens)\r\n T = T + sitaka.bowm_features(tokens)\r\n lemmes = sitaka.lemmes_tokens(tag)\r\n T.append(len(tokens) - len(lemmes))\r\n\r\n T.append(sitaka.polarity(sitaka.pos_polarity(lemmes), sitaka.neg_polarity(lemmes)))\r\n X.append(T)\r\n\r\nX_test =[]\r\nfor tweet in testFile[\"tweet\"]:\r\n T = []\r\n tokens = sitaka.normalize(tweet)\r\n tag = sitaka.tag(tweet)\r\n T = T + sitaka.nb_syntactic_features(tag)\r\n T = T + sitaka.bow_features(tokens)\r\n T = T + sitaka.bonw_features(tokens)\r\n T = T + sitaka.bowo_features(tokens)\r\n T = T + sitaka.bowm_features(tokens)\r\n lemmes = sitaka.lemmes_tokens(tag)\r\n T.append(len(tokens) - len(lemmes))\r\n\r\n T.append(sitaka.polarity(sitaka.pos_polarity(lemmes), sitaka.neg_polarity(lemmes)))\r\n X_test.append(T)\r\n#classifiers = [GaussianNB(), DecisionTreeClassifier(), svm.SVC(kernel='linear', C=1), LogisticRegression()]\r\n\r\n#X_train, X_test, Y_train, Y_test = train_test_split(X, Y,test_size=0.33)\r\n#for cla in classifiers:\r\nclassifier = svm.SVC(kernel='linear', C=1).fit(X,Y)\r\n#scores = cross_val_score(classifier, X, Y, cv=10, scoring = \"f1_macro\")\r\n#print(\"F1_Macro: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\r\nprint(\"Classification des tweets de test...\")\r\nprediction = classifier.predict(X_test)\r\nd = {'index': testFile[\"index\"], 'polarité': le.inverse_transform(prediction)}\r\ndf = pd.DataFrame(data=d)\r\n\r\ndf.to_csv('./task1-run3-equip4.csv', sep='\\t', header=None, index=False)","repo_name":"sam0enna/deft2017","sub_path":"work/equip4/DEFT.py","file_name":"DEFT.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33288458886","text":"from telegram import Bot\r\nfrom telegram import Update\r\nimport random\r\nimport nltk\r\nimport csv\r\n\r\nfrom telegram.ext import Updater\r\nfrom telegram.ext import CommandHandler\r\nfrom telegram.ext import MessageHandler\r\nfrom telegram.ext import Filters\r\n\r\nfrom config import TG_TOKEN, BOT_CONFIG\r\n\r\n# Parser CSV\r\ndef csv_reader(file_obj):\r\n dc = {'intents': {}, 'failure_phrases': ['Извените, я всего лишь робот, поэтому не все понимаю.']}\r\n\r\n # print(dc['intents'])\r\n reader = csv.DictReader(file_obj, delimiter=\";\")\r\n for row in reader:\r\n # print(row['num'])\r\n intent = 'question_' + str(row['num'])\r\n if intent not in dc['intents']:\r\n dc['intents'][intent] = {\r\n 'examples': [],\r\n 'responses': []\r\n }\r\n dc['intents'][intent]['examples'].append(row['question'])\r\n dc['intents'][intent]['responses'].append(row['answer'])\r\n\r\n return dc\r\n\r\n# Очистка\r\ndef clear_phrase(phrase):\r\n phrase = phrase.lower()\r\n\r\n alphabet = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя- '\r\n result = ''.join(symbol for symbol in phrase if symbol in alphabet)\r\n\r\n return result\r\n\r\n# Классофикатор\r\ndef classify_intent(replica):\r\n replica = clear_phrase(replica)\r\n distances = []\r\n intents = []\r\n\r\n for intent, intent_data in BOT_CONFIG['intents'].items():\r\n for example in intent_data['examples']:\r\n example = clear_phrase(example)\r\n # Растояние Левештейна\r\n distance = nltk.edit_distance(replica, example)\r\n # print(intent, distance / len(example))\r\n if example and distance / len(example) < 0.4:\r\n distances.append(str(distance / len(example)))\r\n intents.append(intent)\r\n\r\n if distances:\r\n return intents[distances.index(str(min(distances)))]\r\n\r\ndef get_answer_by_intent(intent):\r\n if intent in BOT_CONFIG['intents']:\r\n responses = BOT_CONFIG['intents'][intent]['responses']\r\n return random.choice(responses)\r\n\r\ndef get_failure_phrase():\r\n failure_phrases = BOT_CONFIG['failure_phrases']\r\n return random.choice(failure_phrases)\r\n\r\n\r\ndef NLPbot(replica):\r\n # NLU\r\n intent = classify_intent(replica)\r\n\r\n # выбор заготовленной реплики\r\n if intent:\r\n answer = get_answer_by_intent(intent)\r\n if answer:\r\n return answer\r\n\r\n # берем заглушку\r\n return get_failure_phrase()\r\n\r\n\r\n\r\ndef do_start(bot: Bot, updata: Update):\r\n bot.send_message(\r\n chat_id=updata.effective_message.chat_id,\r\n text=\"Привет! Меня завут Бот, я Ваш виртуальный консультат по наставничеству.\"\r\n )\r\n bot.send_message(\r\n chat_id=updata.effective_message.chat_id,\r\n text=\"Я здесь, чтобы помочь тебе найти ответы на тему наставничества.\"\r\n )\r\n\r\ndef do_echo(bot: Bot, update: Update):\r\n text = NLPbot(update.effective_message.text)\r\n bot.send_message(\r\n chat_id=update.effective_message.chat_id,\r\n text=text,\r\n )\r\n\r\ndef main():\r\n bot = Bot(\r\n token=TG_TOKEN,\r\n )\r\n updater = Updater(\r\n bot=bot,\r\n )\r\n\r\n start_handler = CommandHandler(\"start\", do_start)\r\n message_handler = MessageHandler(Filters.text, do_echo)\r\n\r\n\r\n updater.dispatcher.add_handler(start_handler)\r\n updater.dispatcher.add_handler(message_handler)\r\n\r\n updater.start_polling()\r\n updater.idle\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","repo_name":"RabbitBanan/tsbrbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34473366308","text":"import glm\n\nfrom lib.opengl import *\nfrom lib.opengl.core.base import *\nfrom .ChunkMeshAllNode_shader import vert_src, frag_src\n\n\nclass ChunkMeshAllNode(RenderNode):\n\n def __init__(self, world, renderer, name):\n super().__init__(name)\n self.world = world\n self.renderer = renderer\n self.mesh = None\n self.mesh_drawable = None\n self.tileset_tex = None\n self.chunk_tex = None\n self.vdf_tex = None\n self.vdf_scale = 1\n\n def has_depth_output(self):\n return True\n\n def num_multi_sample(self):\n return 16\n\n @property\n def chunk(self):\n return self.world.chunk\n\n def get_code(self):\n return frag_src\n\n def create(self, render_settings):\n # level mesh\n mesh_name = \"mesh-%s-drawable\" % self.chunk.id\n print(\"creating mesh\")\n self.mesh = self.chunk.create_mesh()\n print(\"done..\")\n if OpenGlAssets.has(mesh_name):\n self.mesh_drawable = OpenGlAssets.get(mesh_name)\n else:\n print(\"creating mesh VAO\")\n self.mesh_drawable = self.mesh.create_drawable(\"level-mesh\")\n self.mesh_drawable.shader.set_vertex_source(vert_src)\n self.mesh_drawable.shader.set_fragment_source(frag_src)\n OpenGlAssets.register(mesh_name, self.mesh_drawable)\n print(\"done..\", self.mesh_drawable)\n\n if self.tileset_tex is None:\n self.tileset_tex = self.world.tileset.create_texture2d()\n\n if self.chunk_tex is None:\n self.chunk_tex = self.world.chunk.create_texture3d()\n\n # voxel distance field\n if self.vdf_tex is None:\n self.vdf_tex = self.chunk.create_voxel_distance_texture3d(scale=self.vdf_scale)\n\n def render(self, rs, pass_num):\n if 0:\n glDisable(GL_CULL_FACE)\n else:\n glEnable(GL_CULL_FACE)\n glCullFace(GL_BACK)\n glEnable(GL_DEPTH_TEST)\n glEnable(GL_BLEND)\n glDepthMask(True)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n proj = rs.projection.matrix\n\n lightpos = glm.vec3(self.world.click_voxel) + glm.vec3(.5, .5, 1.5)\n shader = self.mesh_drawable.shader\n shader.set_uniform(\"u_projection\", proj)\n shader.set_uniform(\"u_time\", rs.time)\n shader.set_uniform(\"u_lightpos\", glm.vec4(lightpos, 1))\n shader.set_uniform(\"u_color_tex\", 0)\n shader.set_uniform(\"u_chunk_tex\", 1)\n shader.set_uniform(\"u_vdf_tex\", 2)\n shader.set_uniform(\"u_chunksize\", self.chunk.size())\n shader.set_uniform(\"u_vdf_size\", self.vdf_tex.size())\n shader.set_uniform(\"u_vdf_scale\", self.vdf_scale)\n shader.set_uniform(\"u_player_pos\", self.world.agents[\"player\"].sposition)\n shader.set_uniform(\"u_hit_voxel\", self.world.click_voxel)\n shader.set_uniform(\"u_debug_view\", self.world.debug_view)\n\n Texture2D.set_active_texture(0)\n self.tileset_tex.bind()\n Texture2D.set_active_texture(1)\n self.chunk_tex.bind()\n Texture2D.set_active_texture(2)\n self.vdf_tex.bind()\n Texture2D.set_active_texture(0)\n\n # main scene\n self.mesh_drawable.draw()\n\n # waypoints debugger\n if self.world.edit_mode:\n self.world.agents.path_debug_renderer.render(rs.projection)\n\n self.world.agents.render(rs.projection)\n\n glDisable(GL_DEPTH_TEST)\n glDisable(GL_BLEND)\n","repo_name":"defgsus/thegame","sub_path":"lib/world/render/full/ChunkMeshAllNode.py","file_name":"ChunkMeshAllNode.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9099166585","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport utils\nimport config as c\nfrom network.vae import VAE\nfrom environment.window import Window\n\n\ndef plot_gradients():\n # Create plot\n fig, axs = plt.subplots(2, 2, num='Gradients', figsize=(30, 30))\n\n # Load network\n network = VAE()\n network.load()\n\n log = {}\n\n # Plot gradients\n for i, obj in enumerate(('arm', 'target')):\n for j, coord in enumerate(('cart', 'pol')):\n env = Gradient(network, axs[i, j], obj, coord)\n axs[i, j].set_aspect('equal')\n env.run()\n log[obj + '_' + coord] = env.grads\n\n # np.savez_compressed('log_grads', arm_cart=log['arm_cart'],\n # arm_pol=log['arm_pol'],\n # target_cart=log['target_cart'],\n # target_pol=log['target_pol'])\n\n fig.savefig('plots/gradients', bbox_inches='tight')\n\n\nclass Gradient(Window):\n def __init__(self, network, axs, obj, coord):\n super().__init__()\n self.network = network\n self.axs = axs\n self.obj = obj\n self.coord = coord\n\n self.grads = []\n\n self.current = np.array([0, 57, 90])\n self.target = np.array([0, 60, 90])\n\n if self.coord == 'cart':\n self.axs.set_xlabel('x (px)')\n self.axs.set_ylabel('y (px)')\n else:\n self.axs.set_xlabel(r'$\\theta_1 (°)$')\n self.axs.set_ylabel(r'$\\theta_2 (°)$')\n\n def update(self, dt):\n # Draw arm and target\n arm_pos = self.arm.kinematics(self.current)[:, :2]\n target_pos = self.arm.kinematics(self.target)[-1, :2]\n\n if self.coord == 'cart':\n for i in range(c.n_joints):\n self.axs.plot(*arm_pos[i: i + 2].T,\n linewidth=self.arm.size[i, 1] * 3, color='b')\n\n self.axs.scatter(*target_pos, color='r', s=2000)\n else:\n color = 'b' if self.obj == 'arm' else 'r'\n self.axs.scatter(*self.target[1:], color=color, s=2000)\n\n # Draw quiver\n x, u = self.joint(self.current, self.target)\n q = self.axs.quiver(*x.T, *u.T, zorder=-1, angles='xy')\n\n self.grads = [x, u]\n self.stop()\n\n def joint(self, arm, target):\n # Get attractor\n self.arm.set_rotation(arm)\n self.on_draw()\n attractor = self.get_visual_obs()\n\n # Compute gradients\n x, u = [], []\n for j in range(*self.arm.limits[:, 1].astype(int), 10):\n for k in range(*self.arm.limits[:, 2].astype(int), 10):\n # Compute visual prediction\n joint = np.array([0, j, k])\n joint_obs = np.concatenate((joint, target)) if \\\n self.obj == 'arm' else np.concatenate((arm, joint))\n norm = utils.normalize(joint_obs, np.tile(self.arm.limits, 2))\n input_, output = self.network.predict_visual(norm)\n\n # Get error\n predict = output.detach().cpu().numpy()\n error = attractor.reshape(1, 3, c.height, c.width) - predict\n\n # Get gradient\n grad = self.network.get_grad(input_, output, error)\n\n if self.obj == 'arm':\n grad = grad[:c.n_joints]\n else:\n grad = grad[c.n_joints:]\n\n joint_new = np.clip(joint + grad / 20, *self.arm.limits)\n pos = self.arm.kinematics(joint)[-1, :2]\n pos_new = self.arm.kinematics(joint_new)[-1, :2]\n\n if self.coord == 'cart':\n x.append(pos)\n u.append(pos_new - pos)\n else:\n x.append(joint[1:])\n u.append(joint_new[1:] - joint[1:])\n\n return np.array(x), np.array(u)\n","repo_name":"priorelli/PACE","sub_path":"plots/gradients.py","file_name":"gradients.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42866682785","text":"import sys\r\ninput = sys.stdin.readline\r\nsys.setrecursionlimit(10**6)\r\n\r\ndef find_(n, x, y):\r\n if n == 1:\r\n return 0\r\n \r\n g = n//2\r\n nx = x + g\r\n ny = y + g\r\n if r < nx and c < ny:\r\n return find_(g, x, y)\r\n elif r < nx and c >= ny:\r\n return find_(g, x, ny) + g**2\r\n elif r >= nx and c < ny:\r\n return find_(g, nx, y) + (g**2)*2\r\n elif r >= nx and c >= ny:\r\n return find_(g, nx, ny) + (g**2)*3\r\n\r\nN, r, c = map(int, input().split())\r\nprint(find_(2**N, 0, 0))","repo_name":"iblug/Baekjoon","sub_path":"백준/Silver/1074. Z/Z.py","file_name":"Z.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14640316421","text":"N = int(input())\ndata = list(map(int, input().split()))\nadd, sub, mul, div = map(int, input().split())\nmax_v = -int(1e9)\nmin_v = int(1e9)\n\n\ndef dfs(depth, num, add, sub, mul, div):\n global max_v\n global min_v\n\n if depth == N:\n max_v = max(max_v, num)\n min_v = min(min_v, num)\n else:\n if add:\n dfs(depth + 1, num + data[depth], add - 1, sub, mul, div)\n if sub:\n dfs(depth + 1, num - data[depth], add, sub - 1, mul, div)\n if mul:\n dfs(depth + 1, num * data[depth], add, sub, mul - 1, div)\n if div:\n dfs(depth + 1, int(num / data[depth]), add, sub, mul, div - 1)\n\n\ndfs(1, data[0], add, sub, mul, div)\nprint(max_v)\nprint(min_v)\n","repo_name":"hugehoo/problem-solving","sub_path":"2022/2022-12/14888.py","file_name":"14888.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4199030310","text":"#!/usr/bin/python3\nimport os\nimport subprocess\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom flask import Flask, flash, request, redirect, url_for, send_from_directory, render_template, make_response\nfrom datetime import datetime\nfrom functools import wraps, update_wrapper\nfrom werkzeug.utils import secure_filename\n\nBASEDIR = '/vagrant'\nUPLOAD_FOLDER = BASEDIR + '/uploads'\nPROCESSED_FOLDER = BASEDIR + '/processed'\nALLOWED_EXTENSIONS = set(['txt','log'])\n\napp = Flask(__name__, static_url_path='')\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['PROCESSED_FOLDER'] = PROCESSED_FOLDER\napp.secret_key = 'some_secret' \n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\ndef process_file(parser, filein):\n\tcmd = [\"perl\", BASEDIR + \"/parser/parser.pl\", \"-p\", parser, \"-l\", filein, \"-o\", app.config['PROCESSED_FOLDER']]\n\tprint (\"cmd\", cmd)\n\tp = subprocess.Popen(cmd, stdout = subprocess.PIPE,\n\t\t\t\t\t\t\t stderr = subprocess.PIPE,\n\t\t\t\t\t\t\t stdin = subprocess.PIPE)\n\tout,err = p.communicate()\n\ndef nocache(view):\n @wraps(view)\n def no_cache(*args, **kwargs):\n response = make_response(view(*args, **kwargs))\n response.headers['Last-Modified'] = datetime.now()\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response\n \n return update_wrapper(no_cache, view)\n \n@app.route('/')\ndef root():\n\treturn render_template(\"index.html\")\n\t\n@app.route('/upload_parser', methods=['GET', 'POST'])\ndef upload_parser():\n if request.method == 'POST':\n # check if the post request has the parser field\n parser = request.form['parser']\n print(\"parser: \" + parser)\n \n # check if the post request has the file part\n if 'filein' not in request.files:\n # if 'filein' not in request.files['filein']:\n flash('No filein part')\n return redirect(request.url)\n \n file = request.files['filein']\n print(\"file: \" + file.filename)\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n print(\"is it an allowed_file?\");\n\t\t\t\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n filein = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n # Save the input file locally\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n \n pre, ext = os.path.splitext(filename)\n print (\"pre: \"+pre)\n json_file = pre+'.json'\n loghtmlfile = filename +'.html'\n \n print (\"json_file: \" + json_file)\n json_file_path = os.path.join(app.config['PROCESSED_FOLDER'], json_file)\n \n # Now process the file - output will be json file in processed folder\n process_file(parser, filein)\n\n # Render the processed file\n return render_template(\"plot_msc.html\", parser=parser, msc_json=json_file, loghtml=loghtmlfile)\n else:\n print(\"Not a valid file type\")\n\t\t\t\n return redirect('/')\n\n@app.route('/uploads/<filename>')\n@nocache\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n\n\n@app.route('/processed/<filename>')\n@nocache\ndef processed_file(filename):\n return send_from_directory(app.config['PROCESSED_FOLDER'],\n filename)\n\n# Version to take csv file as a param\t\t\t\t\n@app.route('/plot_msc')\ndef plot_msc(parser=None, msc_json=None):\n return render_template('plot_msc.html', \n parser=parser, \n msc_json=msc_json)\n\n\n@app.route('/test')\ndef plot_test(parser=None, msc_json=None):\n return render_template('plot_msc.html', \n parser=\"simple\", \n msc_json=\"simple_msc.json\",\n loghtml=\"simple_msc.txt.html\")\n\t\n@app.route('/js/<path:path>')\ndef send_js(path):\n\treturn send_from_directory('js', path)\n\t\n@app.route('/lib/<path:path>')\ndef send_lib(path):\n\treturn send_from_directory('lib', path)\n\n@app.route('/css/<path:path>')\ndef send_css(path):\n\treturn send_from_directory('css', path)\n\t\n@app.route('/config')\ndef send_():\n\treturn send_from_directory('parser', \"parser_config.xml\")\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')","repo_name":"TheDevelolper/parserService","sub_path":"parserService.py","file_name":"parserService.py","file_ext":"py","file_size_in_byte":4763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7671804260","text":"import os\nimport shutil\nimport pytest\n\nfrom fastapi.testclient import TestClient\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.event import listens_for\nfrom passlib.context import CryptContext\n\nfrom auth import create_token\nfrom main import app\nfrom dependencies import get_db\nfrom database import Base\nfrom models import User, File\nfrom routers import hash_file\n\n\nTEST_USER_DATA = {\n \"username\": \"admin\",\n \"email\": \"admin@admin.com\",\n \"first_name\": None,\n \"last_name\": None,\n \"password\": \"admin\",\n}\nTEST_FILE_PATH = 'files_to_upload'\nUPLOAD_DIR = 'media'\n\npwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\")\n\nSQLALCHEMY_DATABASE_URL = f\"postgresql://postgres:postgres@db/db\"\n\nengine = create_engine(SQLALCHEMY_DATABASE_URL)\nTestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n\nBase.metadata.drop_all(bind=engine)\nBase.metadata.create_all(bind=engine)\n\n\n@pytest.fixture()\ndef session():\n connection = engine.connect()\n transaction = connection.begin()\n session = TestingSessionLocal(bind=connection)\n\n nested = connection.begin_nested()\n\n @listens_for(session, \"after_transaction_end\")\n def end_savepoint(session, transaction):\n nonlocal nested\n if not nested.is_active:\n nested = connection.begin_nested()\n\n yield session\n\n session.close()\n transaction.rollback()\n connection.close()\n\n\n@pytest.fixture()\ndef client(session):\n def override_get_db():\n yield session\n app.dependency_overrides[get_db] = override_get_db\n yield TestClient(app)\n del app.dependency_overrides[get_db]\n\n\n@pytest.fixture()\ndef add_user(session):\n user_data = TEST_USER_DATA.copy()\n hashed_password = pwd_context.hash(user_data.pop(\"password\"))\n user_data['hashed_password'] = hashed_password\n db_user = User(**user_data)\n session.add(db_user)\n session.commit()\n session.refresh(db_user)\n\n\n@pytest.fixture()\ndef user_token() -> str:\n return create_token(\n TEST_USER_DATA[\"username\"]\n )['access_token']\n\n\n@pytest.fixture()\ndef add_simple_file(add_user, session):\n with open(f'{TEST_FILE_PATH}/simple_file.txt', 'rb') as file:\n current_user = session.query(User).filter(User.username == TEST_USER_DATA['username']).first()\n db_file = File(\n filename=file.name.split('/')[1],\n file_dir=f'{UPLOAD_DIR}/{current_user.id}',\n description=None,\n owner_id=current_user.id,\n content_type='text',\n file_size_bytes=file.__sizeof__(),\n filehash=hash_file(file.read())\n )\n\n if not os.path.exists(UPLOAD_DIR):\n os.mkdir(UPLOAD_DIR)\n if not os.path.exists(db_file.file_dir):\n os.mkdir(db_file.file_dir)\n with open(f'{db_file.file_dir}/{db_file.filename}', 'wb') as new_file:\n shutil.copyfileobj(file, new_file)\n\n session.add(db_file)\n session.commit()\n session.refresh(db_file)\n","repo_name":"tarashurik/filesstorage","sub_path":"app/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12923146846","text":"\"\"\"train.py \n\nMain training script.\n\nUsage:\n train.py [--gpu=<id>] [--view=<dset>]\n train.py (-h | --help)\n train.py --version\n\nOptions:\n -h --help Show this string.\n --version Show version.\n --gpu=<id> Comma separated GPU list. \n --view=<dset> View dataset- use either 'train' or 'valid'.\n\"\"\"\n\nfrom docopt import docopt\nimport argparse\nimport json\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorpack import Inferencer, logger\nfrom tensorpack.callbacks import (\n DataParallelInferenceRunner,\n ModelSaver,\n MinSaver,\n MaxSaver,\n ScheduledHyperParamSetter,\n)\nfrom tensorpack.tfutils import SaverRestore, get_model_loader\nfrom tensorpack.train import (\n SyncMultiGPUTrainerParameterServer,\n TrainConfig,\n launch_train_with_config,\n)\n\nimport loader.loader as loader\nfrom config import Config\nfrom misc.utils import get_files\n\nfrom sklearn.metrics import roc_auc_score\n\n\nclass StatCollector(Inferencer, Config):\n \"\"\"\n Accumulate output of inference during training.\n After the inference finishes, calculate the statistics\n \"\"\"\n\n def __init__(self, prefix=\"valid\"):\n super(StatCollector, self).__init__()\n self.prefix = prefix\n\n def _get_fetches(self):\n return self.train_inf_output_tensor_names\n\n def _before_inference(self):\n self.true_list = []\n self.pred_list = []\n\n def _on_fetches(self, outputs):\n pred, true = outputs\n self.true_list.extend(true)\n self.pred_list.extend(pred)\n\n def _after_inference(self):\n # ! factor this out\n def _dice(true, pred, label):\n true = np.array(true[..., label], np.int32)\n pred = np.array(pred[..., label], np.int32)\n inter = (pred * true).sum()\n total = (pred + true).sum()\n return 2 * inter / (total + 1.0e-8)\n\n stat_dict = {}\n pred = np.array(self.pred_list)\n true = np.array(self.true_list)\n\n if self.model_mode == \"seg_gland\":\n # Get the segmentation stats\n\n pred = pred[..., :2]\n true = true[..., :2]\n\n # Binarize the prediction\n pred[pred > 0.5] = 1.0\n\n stat_dict[self.prefix + \"_dice_obj\"] = _dice(true, pred, 0)\n stat_dict[self.prefix + \"_dice_cnt\"] = _dice(true, pred, 1)\n\n elif self.model_mode == \"seg_nuc\":\n # Get the segmentation stats\n\n pred = pred[..., :3]\n true = true[..., :3]\n\n # Binarize the prediction\n pred[pred > 0.5] = 1.0\n\n stat_dict[self.prefix + \"_dice_np\"] = _dice(true, pred, 0)\n stat_dict[self.prefix + \"_dice_mk_blb\"] = _dice(true, pred, 1)\n stat_dict[self.prefix + \"_dice_mk_cnt\"] = _dice(true, pred, 2)\n\n else:\n # Get the classification stats\n\n # Convert vector to scalar prediction\n prob = np.squeeze(pred[..., 1])\n pred = np.argmax(pred, -1)\n pred = np.squeeze(pred)\n true = np.squeeze(true)\n\n accuracy = (pred == true).sum() / np.size(true)\n error = (pred != true).sum() / np.size(true)\n\n stat_dict[self.prefix + \"_acc\"] = accuracy * 100\n stat_dict[self.prefix + \"_error\"] = error * 100\n\n if self.model_mode == \"class_pcam\":\n auc = roc_auc_score(true, prob)\n stat_dict[self.prefix + \"_auc\"] = auc\n\n return stat_dict\n\n\n###########################################\n\n\nclass Trainer(Config):\n def get_datagen(self, batch_size, mode=\"train\", view=False):\n if mode == \"train\":\n augmentors = self.get_train_augmentors(\n self.train_input_shape, self.train_output_shape, view\n )\n data_files = get_files(self.train_dir, self.data_ext)\n # Different data generators for segmentation and classification\n if self.model_mode == \"seg_gland\" or self.model_mode == \"seg_nuc\":\n data_generator = loader.train_generator_seg\n else:\n data_generator = loader.train_generator_class\n nr_procs = self.nr_procs_train\n else:\n augmentors = self.get_valid_augmentors(\n self.train_input_shape, self.train_output_shape, view\n )\n # Different data generators for segmentation and classification\n data_files = get_files(self.valid_dir, self.data_ext)\n if self.model_mode == \"seg_gland\" or self.model_mode == \"seg_nuc\":\n data_generator = loader.valid_generator_seg\n else:\n data_generator = loader.valid_generator_class\n nr_procs = self.nr_procs_valid\n\n # set nr_proc=1 for viewing to ensure clean ctrl-z\n nr_procs = 1 if view else nr_procs\n dataset = loader.DatasetSerial(data_files)\n if self.model_mode == \"seg_gland\" or self.model_mode == \"seg_nuc\":\n datagen = data_generator(\n dataset,\n shape_aug=augmentors[0],\n input_aug=augmentors[1],\n label_aug=augmentors[2],\n batch_size=batch_size,\n nr_procs=nr_procs,\n )\n else:\n datagen = data_generator(\n dataset,\n shape_aug=augmentors[0],\n input_aug=augmentors[1],\n batch_size=batch_size,\n nr_procs=nr_procs,\n )\n\n return datagen\n\n def view_dataset(self, mode=\"train\"):\n assert mode == \"train\" or mode == \"valid\", \"Invalid view mode\"\n if self.model_mode == \"seg_gland\" or self.model_mode == \"seg_nuc\":\n datagen = self.get_datagen(4, mode=mode, view=True)\n loader.visualize(datagen, 4)\n else:\n # visualise more for classification- don't need to show label\n datagen = self.get_datagen(8, mode=mode, view=True)\n loader.visualize(datagen, 8)\n return\n\n def run_once(self, opt, sess_init=None, save_dir=None):\n\n train_datagen = self.get_datagen(opt[\"train_batch_size\"], mode=\"train\")\n valid_datagen = self.get_datagen(opt[\"infer_batch_size\"], mode=\"valid\")\n\n ###### must be called before ModelSaver\n if save_dir is None:\n logger.set_logger_dir(self.save_dir)\n else:\n logger.set_logger_dir(save_dir)\n\n ######\n model_flags = opt[\"model_flags\"]\n model = self.get_model()(**model_flags)\n ######\n callbacks = [\n ModelSaver(max_to_keep=1, keep_checkpoint_every_n_hours=None),\n ]\n\n for param_name, param_info in opt[\"manual_parameters\"].items():\n model.add_manual_variable(param_name, param_info[0])\n callbacks.append(ScheduledHyperParamSetter(param_name, param_info[1]))\n # multi-GPU inference (with mandatory queue prefetch)\n infs = [StatCollector()]\n callbacks.append(\n DataParallelInferenceRunner(valid_datagen, infs, list(range(nr_gpus)))\n )\n if self.model_mode == \"seg_gland\":\n callbacks.append(MaxSaver(\"valid_dice_obj\"))\n elif self.model_mode == \"seg_nuc\":\n callbacks.append(MaxSaver(\"valid_dice_np\"))\n else:\n callbacks.append(MaxSaver(\"valid_auc\"))\n\n steps_per_epoch = train_datagen.size() // nr_gpus\n\n config = TrainConfig(\n model=model,\n callbacks=callbacks,\n dataflow=train_datagen,\n steps_per_epoch=steps_per_epoch,\n max_epoch=opt[\"nr_epochs\"],\n )\n config.session_init = sess_init\n\n launch_train_with_config(config, SyncMultiGPUTrainerParameterServer(nr_gpus))\n tf.reset_default_graph() # remove the entire graph in case of multiple runs\n return\n\n def run(self):\n def get_last_chkpt_path(prev_phase_dir):\n stat_file_path = prev_phase_dir + \"/stats.json\"\n with open(stat_file_path) as stat_file:\n info = json.load(stat_file)\n chkpt_list = [epoch_stat[\"global_step\"] for epoch_stat in info]\n last_chkpts_path = \"%smodel-%d.index\" % (prev_phase_dir, max(chkpt_list))\n return last_chkpts_path\n\n phase_opts = self.training_phase\n\n if len(phase_opts) > 1:\n for idx, opt in enumerate(phase_opts):\n\n log_dir = \"%s/%02d\" % (self.save_dir, idx)\n if opt[\"pretrained_path\"] == -1:\n pretrained_path = get_last_chkpt_path(prev_log_dir)\n init_weights = SaverRestore(\n pretrained_path, ignore=[\"learning_rate\"]\n )\n elif opt[\"pretrained_path\"] is not None:\n init_weights = get_model_loader(pretrained_path)\n self.run_once(opt, sess_init=init_weights, save_dir=log_dir + \"/\")\n prev_log_dir = log_dir\n else:\n\n opt = phase_opts[0]\n if \"pretrained_path\" in opt:\n if opt[\"pretrained_path\"] == None:\n init_weights = None\n elif opt[\"pretrained_path\"] == -1:\n log_dir_prev = \"%s\" % self.save_dir\n pretrained_path = get_last_chkpt_path(log_dir_prev)\n init_weights = SaverRestore(\n pretrained_path, ignore=[\"learning_rate\"]\n )\n else:\n init_weights = get_model_loader(opt[\"pretrained_path\"])\n self.run_once(opt, sess_init=init_weights, save_dir=self.save_dir)\n\n return\n\n\n###########################################################################\n\n\nif __name__ == \"__main__\":\n\n args = docopt(__doc__)\n print(args)\n\n trainer = Trainer()\n\n if args[\"--view\"] and args[\"--gpu\"]:\n raise Exception(\"Supply only one of --view and --gpu.\")\n\n if args[\"--view\"]:\n if args[\"--view\"] != \"train\" and args[\"--view\"] != \"valid\":\n raise Exception('Use \"train\" or \"valid\" for --view.')\n trainer.view_dataset(args[\"--view\"])\n else:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args[\"--gpu\"]\n nr_gpus = len(args[\"--gpu\"].split(\",\"))\n trainer.run()\n","repo_name":"simongraham/dsf-cnn","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10266,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"81"} +{"seq_id":"70697451146","text":"\n\"\"\"\nThis object holds all team information. Initially, this includes their metadata and eventually gets\nupdated to hold their total score.\n\n- team_name: name of the team\n- real_name: full name of person who submitted form\n- split_preference: whether the person wants winner-take-all or split the top 3 winners\n- pay_type: free or paying team\n- total_score: initially set to 0, will be updated throughout\n\"\"\"\n\n\nclass Team:\n def __init__(self, team_name: str, real_name: str, split_preference: str, pay_type: str):\n self.team_name = team_name\n self.real_name = real_name\n self.split_preference = split_preference\n self.pay_type = pay_type\n self.total_score = 0\n\n def add_to_total_score(self, addition: int):\n \"\"\"\n mechanism to update the score from 0 to the total for a given question\n\n :param addition: point value accrued for a given question (0 if incorrect)\n :return: updated score\n \"\"\"\n self.total_score += addition\n\n def __str__(self):\n base_string = f\"Team: {self.team_name}; team name: {self.real_name}; \" \\\n f\"split preference: {self.split_preference}; pay type: {self.pay_type}\"\n if self.total_score == 0:\n return base_string\n else:\n return f\"{base_string}; total score: {self.total_score}\"\n","repo_name":"Anne-alysis/GameOfThronesLeaguePythonOO","sub_path":"src/classes/Team.py","file_name":"Team.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2251494515","text":"import json, logging, os, data\nimport torch\nimport torch.nn as nn\nimport boto3\n\nfrom model import EncoderRNN, Attn, LuongAttnDecoderRNN\nfrom evaluate import GreedySearchDecoder, predict_answer\nfrom sagemaker import get_execution_role\n\nrole = get_execution_role()\nbucketName = 'faq-chatbot'\n\nJSON_CONTENT_TYPE = 'application/json'\nlogger = logging.getLogger(__name__)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nlogger.info('Current device: {}'.format(device))\n\ndef model_fn(model_dir):\n logger.info('Loading the model.')\n model_info = {}\n\n with open(os.path.join(model_dir, 'model_info.pth'), 'rb') as f:\n # If loading a model trained on GPU to CPU\n if torch.cuda.device_count() < 1:\n checkpoint = torch.load(f, map_location=torch.device('cpu'))\n else:\n checkpoint = torch.load(f)\n \n #have to save these hyper parameters\n hidden_size = model_info['hidden_size']\n encoder_n_layers = model_info['encoder_n_layers']\n decoder_n_layers = model_info['decoder_n_layers']\n dropout = model_info['dropout']\n attn_model = model_info['attn_model']\n voc = model_info['voc']\n \n # Initialize word embeddings\n embedding = nn.Embedding(voc.num_words, hidden_size)\n embedding.load_state_dict(checkpoint['embedding'])\n\n # Initialize encoder & decoder models\n encoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)\n decoder = LuongAttnDecoderRNN(attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout)\n encoder.load_state_dict(checkpoint['en'])\n decoder.load_state_dict(checkpoint['de'])\n \n # Set dropout layers to eval mode\n encoder.eval()\n decoder.eval()\n \n searcher = GreedySearchDecoder(encoder, decoder, device)\n \n return {'searcher': searcher, 'voc': voc}\n\n\ndef input_fn(serialized_input_data, content_type=JSON_CONTENT_TYPE):\n logger.info('Deserializing the input data.')\n\n if content_type == JSON_CONTENT_TYPE:\n input_data = json.loads(serialized_input_data)\n if len(input_data['question']) < 3:\n raise Exception('\\'question\\' has to be larger than 3 char')\n return input_data\n raise Exception('Requested unsupported ContentType in content_type: ' + content_type)\n\n\ndef output_fn(prediction_output, accept=JSON_CONTENT_TYPE):\n logger.info('Serializing the generated output.')\n if accept == JSON_CONTENT_TYPE:\n return json.dumps(prediction_output), accept\n raise Exception('Requested unsupported ContentType in Accept: ' + accept)\n\ndef predict_fn(input_data, model):\n logger.info('Generating answer based on input question.')\n with torch.no_grad(): # no tracking history\n return ''.join(predict_answer(model['searcher'], model['voc'], input_data, device))\n \n \n \n \n\n\n\n","repo_name":"cheesama/machine-learning","sub_path":"NLP/faq-chatbot/entry_point.py","file_name":"entry_point.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"215307001","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('send_email', views.SendEmail.as_view(), name='emailSend'),\n path('send_bulk', views.SendBulkEmail.as_view(), name='emailSendBulk'),\n path('draft/<int:pk>', views.ViewDraft.as_view(), name='emailSendDraft'),\n path('get_cc', views.get_users_for_cc, name='getEmailCC'),\n path('get_to', views.get_clients_records_for_to, name='getEmailTO'),\n\n path('migrate_notes', views.migrate_notes_to_email, name='migrateNotesToEmail'),\n\n # path('compose_email', views.EmailHomepage.as_view(), name='emailCompose'),\n # path('compose_email/<int:qb_id>', views.SendEmailFromQBID.as_view(), name='emailComposeFromQBID'),\n path('get_template/<int:pk>', views.get_existing_template, name='getTemplate'),\n path('all_templates', views.AllTemplates.as_view(), name='emailAllTemplates'),\n path('create_new_template', views.CreateNewTemplate.as_view(), name='createNewTemplate'),\n path('edit_template/<int:pk>', views.EditTemplate.as_view(), name='emailEditTemplate'),\n path('delete_template/<int:pk>', views.delete_template, name='deleteEmailTemplate'),\n path('sent_mail', views.AllSentMail.as_view(), name='emailSentMail'),\n path('view_sent_email/<int:pk>', views.ViewSentEmail.as_view(), name='emailViewSentEmail'),\n path('drafts', views.AllDrafts.as_view(), name='emailDrafts'),\n path('view_draft/<int:pk>', views.ViewDraft.as_view(), name='viewEmailDraft'),\n path('migrate_to_record', views.migrate_to_record, name='migrateToRecord'),\n path('clear_bounce/<int:client_pk>', views.clear_bounce, name='clearBounce'),\n path('sib_webhook', views.sib_webhook, name='sibWebhook'),\n\n path('email_missed_texts', views.email_missed_text_messages, name='textEmailMissedMessages'),\n path('save_text_template', views.save_text_template, name='textSaveTemplate'),\n path('get_text_template/<int:template_pk>', views.load_text_template, name='textGetTemplate'),\n path('upload_text_attachment', views.save_attachment, name='textSaveAttachment'),\n path('save_attached_picture/<int:service_pk>', views.save_attached_picture, name='textSavePicture'),\n path('all_texts', views.AllTextMessages.as_view(), name='textAllMessages'),\n path('text_by_phone', views.AllTextForNumber.as_view(), name='textAllMessagesByPhone'),\n path('all_text_read', views.mark_all_text_read, name='textMarkAllRead'), # IF YOU UPDATE THIS, UPDATE WCASYSTEM\n path('mark_main_mobile', views.mark_main_mobile, name='textMarkMainMobile'),\n path('mark_alt_mobile', views.mark_alt_mobile, name='textMarkAltMobile'),\n path('mark_other_mobile', views.mark_other_mobile, name='textMarkOtherMobile'),\n path('text_bulk', views.TextBulk.as_view(), name='textSendBulk'),\n\n path('text_migrate_client_record', views.migrate_text_client_record, name='textMigrateClientRecord'),\n]\n","repo_name":"varlenthegray/wcadmin","sub_path":"communication/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33765530556","text":"import flet as ft\n\ndef main(page:ft.Page):\n def on_keyboard(e: ft.KeyboardEvent):\n page.add(\n ft.Text(\n f'Key: {e.key}, Shift: {e.shift}, Control: {e.ctrl}, Alt: {e.alt}, Meta: {e.meta}'\n )\n )\n \n page.on_keyboard_event = on_keyboard\n \n page.add(ft.Text('Presione cualquier tecla con una combinación (Control, Alt, Shift, Commmand)...'))\n \n\nft.app(target=main)","repo_name":"nelsongil/Flet-curso","sub_path":"s05_keyboard_shortcuts/demo_01_shortcuts.py","file_name":"demo_01_shortcuts.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71219880265","text":"\"\"\"User View tests.\"\"\"\n\n# run these tests like:\n#\n# FLASK_ENV=production python -m unittest test_message_views.py\n\nimport os\nfrom unittest import TestCase\n\nfrom models import db, connect_db, Message, User, Likes, Follows\n\n\n# BEFORE we import our app, let's set an environmental variable\n# to use a different database for tests (we need to do this\n# before we import our app, since that will have already\n# connected to the database\n\nos.environ['DATABASE_URL'] = \"postgresql:///warbler-test\"\n\n\n# Now we can import app\n\nfrom app import app, CURR_USER_KEY\n\n# Create our tables (we do this here, so we only create the tables\n# once for all tests --- in each test, we'll delete the data\n# and create fresh new clean test data\n\ndb.create_all()\n\n# Don't have WTForms use CSRF at all, since it's a pain to test\n\napp.config['WTF_CSRF_ENABLED'] = False\n\n\nclass UserViewTestCase(TestCase):\n \"\"\"Test views for Users.\"\"\"\n \n def setUp(self):\n \"\"\"Create test client, add sample data.\"\"\"\n\n db.drop_all()\n db.create_all()\n\n self.client = app.test_client()\n\n self.testuser = User.signup(username=\"testuser\",\n email=\"test@test.com\",\n password=\"testuser\",\n image_url=None)\n self.testuser_id = 9999\n self.testuser.id = self.testuser_id\n\n self.user1 = User.signup(\"one\", \n \"one@test.com\", \n \"password\", \n None)\n self.user1.id = 1111\n\n self.user2 = User.signup(\"two\", \n \"two@test.com\", \n \"password\", \n None)\n self.user2.id = 2222\n\n self.user3 = User.signup(\"three\", \n \"three@test.com\", \n \"password\", \n None)\n self.user3.id = 3333\n\n db.session.commit()\n\n\n def tearDown(self):\n \"\"\"Clean up any foul transaction.\"\"\"\n\n db.session.rollback()\n\n###############################################################################################\n# USER ROUTES\n\n def test_list_all_users(self):\n \"\"\"Route to list of users\"\"\"\n with self.client as client:\n res = client.get(\"/users\")\n\n self.assertIn(\"@testuser\", str(res.data))\n self.assertIn(\"@one\", str(res.data))\n self.assertIn(\"@two\", str(res.data))\n self.assertIn(\"@three\", str(res.data))\n \n def test_search_user(self):\n \"\"\"Search user route\"\"\"\n\n with self.client as client:\n res = client.get(\"/users?q=t\")\n\n self.assertIn(\"@testuser\", str(res.data))\n self.assertIn(\"@two\", str(res.data))\n self.assertIn(\"@three\", str(res.data))\n self.assertNotIn(\"@one\", str(res.data))\n\n def test_show_user(self):\n \"\"\"Show User\"\"\"\n with self.client as client:\n res = client.get(f\"/users/{self.testuser_id}\")\n\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"@testuser\", str(res.data))\n \n###############################################################################################\n# LIKE ROUTES\n\n def setup_likes(self):\n \"\"\"set up messages and likes to use in tests\"\"\"\n message1 = Message(text=\"I like pizza\", \n user_id=self.testuser_id)\n\n message2 = Message(id = 22222,\n text=\"I like burritos\", \n user_id=self.user1.id)\n \n db.session.add_all([message1, message2])\n db.session.commit()\n\n like = Likes(user_id=self.testuser_id, \n message_id=22222)\n\n db.session.add(like)\n db.session.commit()\n\n def test_like_message(self):\n \"\"\"Add like to a message\"\"\"\n \n # create a message that will be liked\n message = Message(id = 11111,\n text = \"I like sandwiches\", \n user_id = self.testuser_id)\n\n db.session.add(message)\n db.session.commit()\n\n # create a user who will like the created message \n new_user = User.signup(\n username = 'new_user',\n email = \"test4@test.com\",\n password = \"HASHED_PASSWORD\",\n image_url = None\n )\n new_user.id = 999999\n\n db.session.add(new_user)\n db.session.commit()\n\n # set session to be logged in as the created user\n with self.client as client:\n with client.session_transaction() as session:\n session[CURR_USER_KEY] = 999999\n\n # make post request (like) for that created message\n res = client.post(\"/messages/11111/like\",\n follow_redirects=True)\n\n likes = Likes.query.filter(Likes.message_id==11111).all()\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(len(likes), 1)\n self.assertEqual(likes[0].user_id, 999999)\n\n \n def test_remove_like(self):\n \"\"\"Removing a like from a message\"\"\"\n self.setup_likes()\n\n # get the liked message \n message = Message.query.filter(Message.text==\"I like burritos\").one()\n self.assertIsNotNone(message)\n # should not be a message created by the logged in user\n self.assertNotEqual(message.user_id, self.testuser_id)\n\n # query the message which is liked by the logged in user\n like = Likes.query.filter(Likes.user_id==self.testuser_id and Likes.message_id==message.id).one()\n\n # check if there is a like\n self.assertIsNotNone(like)\n\n \n with self.client as client:\n with client.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser_id\n\n # we will make another post request to like that message so we can unlike it. \n res = client.post(f\"/messages/{message.id}/like\", \n follow_redirects=True)\n\n likes = Likes.query.filter(Likes.message_id==message.id).all()\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(len(likes), 0)\n\n\n\n def test_like_message_unauthorized_user(self):\n \"\"\"Making a like while not signed in\"\"\"\n self.setup_likes()\n\n # select a message\n message = Message.query.filter(Message.text==\"I like burritos\").one()\n self.assertIsNotNone(message)\n\n # count the number of likes there are\n like_count = Likes.query.count()\n\n # now try to make a post request to make a like to that selected message\n with self.client as client:\n res = client.post(f\"/messages/{message.id}/like\", \n follow_redirects=True)\n\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"Access unauthorized\", str(res.data))\n # access unauthorized since user is not logged in\n self.assertEqual(like_count, Likes.query.count())\n # number of likes should not change since user is unauthorized to make this request\n\n###############################################################################################\n# FOLLOWER/FOLLOWING ROUTES\n\n def setup_followers(self):\n \"\"\"Set up followers to use in tests\"\"\"\n\n # testuser follows user1\n follow1 = Follows(user_being_followed_id=self.user1.id, \n user_following_id=self. testuser_id)\n\n # testuser follows user2\n follow2 = Follows(user_being_followed_id=self.user2.id, \n user_following_id=self.testuser_id)\n\n # user1 follows testuser\n follow3 = Follows(user_being_followed_id=self.testuser_id, \n user_following_id=self.user1.id)\n\n db.session.add_all([follow1, follow2, follow3])\n db.session.commit()\n\n\n def test_show_following(self):\n \"\"\"show user's following list\"\"\"\n\n self.setup_followers()\n\n with self.client as client:\n with client.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser_id\n\n res = client.get(f\"/users/{self.testuser_id}/following\")\n\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"@one\", str(res.data))\n self.assertNotIn(\"@three\", str(res.data)) \n\n\n def test_show_followers(self):\n \"\"\"show user's followers list\"\"\"\n\n self.setup_followers()\n\n with self.client as client:\n with client.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser_id\n\n res = client.get(f\"/users/{self.testuser_id}/followers\")\n\n # since user1 follows testuser, it should show in the response\n self.assertIn(\"@one\", str(res.data))\n self.assertNotIn(\"@two\", str(res.data))\n self.assertNotIn(\"@three\", str(res.data))\n\n\n def test_show_following_page__unauthorized(self):\n \"\"\"Users not logged in should not be able to see a user's following page\"\"\"\n \n self.setup_followers()\n \n with self.client as client:\n res = client.get(f\"/users/{self.testuser_id}/following\", \n follow_redirects=True)\n\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"Access unauthorized\", str(res.data))\n self.assertNotIn(\"@one\", str(res.data))\n\n\n def test_followers_page_unauthorized(self):\n \"\"\"Users not logged in should not be able to see a user's followers page\"\"\"\n \n self.setup_followers()\n \n with self.client as client:\n\n res = client.get(f\"/users/{self.testuser_id}/followers\", \n follow_redirects=True)\n\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"Access unauthorized\", str(res.data))\n self.assertNotIn(\"@one\", str(res.data))\n\n\n def test_add_follow(self):\n \"\"\"test user following another user\"\"\"\n\n with self.client as client:\n with client.session_transaction() as session:\n session[CURR_USER_KEY] = self.user2.id\n\n res = client.post(f\"/users/follow/{self.testuser_id}\", \n follow_redirects=True)\n\n self.assertEqual(res.status_code, 200)\n self.assertNotIn(\"@one\", str(res.data))\n \n\n def test_remove_follow(self):\n \"\"\"test user unfollowing another user\"\"\"\n\n self.setup_followers()\n\n with self.client as client:\n with client.session_transaction() as session:\n session[CURR_USER_KEY] = self.user1.id\n\n res = client.post(f\"/users/stop-following/{self.testuser_id}\", \n follow_redirects=True)\n\n self.assertEqual(res.status_code, 200)\n self.assertNotIn(\"@testuser\", str(res.data))\n\n###############################################################################################\n","repo_name":"thedvo/Twitter-Clone-Warbler-","sub_path":"test_user_views.py","file_name":"test_user_views.py","file_ext":"py","file_size_in_byte":11168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6200613577","text":"import csv\nimport pymongo\n\n\"\"\"\n@api {Function} createDic(url) createDic.py\n@apiName CreateDictionary\n@apiGroup Recommend Algorithm\n@apiParam {String} URL The current url of database.\n@apiDescription This components lets the createDic.py to get current song features to save as a csv file.\n\"\"\"\n\n\ndef save_dic(my_dic):\n try:\n with open('dict.csv', 'w', newline='') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=csv_columns)\n writer.writeheader()\n writer.writerows(my_dic)\n except IOError:\n print(\"I/O error\")\n\n\ndef connect_db(col_name):\n client = pymongo.MongoClient(uri)\n my_db = client.jukebox\n my_col = my_db[col_name]\n return my_col.find()\n\n\ndef create_dic(collections):\n # zip the song name to int\n list_song_dict = [] # store the song as dict into the list\n for i in collections:\n # the song id and feature were stored as a dictionary\n tmp = {\"songID\": i['songID'], 'acousticness': i['acousticness'], 'danceability': i['danceability'],\n 'energy': i['energy'], 'instrumentalness': i['instrumentalness'], 'liveness': i['liveness'],\n 'loudness': i['loudness'], 'speechiness': i['speechiness'], 'valence': i['valence'],\n \"tempo\": i['tempo']}\n if tmp not in list_song_dict:\n list_song_dict.append(tmp)\n return list_song_dict\n\n\nuri = \"mongodb://public:bjssjukeboxgroup14@ds261253.mlab.com:61253/jukebox\"\ncsv_columns = [\"songID\", 'acousticness', 'danceability', 'energy', 'instrumentalness', 'liveness',\n 'loudness', 'speechiness', 'valence', \"tempo\"]\n\n\ndef main():\n songs = connect_db(\"histories\")\n my_dicts = create_dic(songs)\n save_dic(my_dicts)\n\n\n\n","repo_name":"piercemorris/BJSSOfficeJukebox","sub_path":"recommandAlgorithm/createDic.py","file_name":"createDic.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"13019314265","text":"from __future__ import print_function\nimport sys\n\nimport ckan.lib.cli as cli\nimport ckan.plugins as p\nimport ckan.model as model\n\nimport ckanext.datastore.helpers as h\nfrom ckanext.xloader.command import XloaderCmd\n\n# Paster command for CKAN 2.8 and below\n\n\nclass xloaderCommand(cli.CkanCommand):\n '''xloader commands\n\n Usage:\n\n xloader submit [options] <dataset-spec>\n Submit the given datasets' resources to be xloaded into the\n DataStore. (They are added to the queue for CKAN's background task\n worker.)\n\n where <dataset-spec> is one of:\n\n <dataset-name> - Submit a particular dataset's resources\n\n <dataset-id> - Submit a particular dataset's resources\n\n all - Submit all datasets' resources to the DataStore\n\n all-existing - Re-submits all the resources already in the\n DataStore. (Ignores any resources that have not been stored\n in DataStore, e.g. because they are not tabular)\n\n options:\n\n --dry-run - doesn't actually submit any resources\n\n --ignore-format - submit resources even if they have a format\n not in the configured ckanext.xloader.formats\n\n xloader status\n Shows status of jobs\n '''\n\n summary = __doc__.split('\\n')[0]\n usage = __doc__\n min_args = 1\n\n def __init__(self, name):\n super(xloaderCommand, self).__init__(name)\n self.error_occured = False\n\n self.parser.add_option('-y', dest='yes',\n action='store_true', default=False,\n help='Always answer yes to questions')\n self.parser.add_option('--ignore-format',\n action='store_true', default=False,\n help='Submit even if the resource.format is not'\n ' in ckanext.xloader.formats')\n self.parser.add_option('--dry-run',\n action='store_true', default=False,\n help='Don\\'t actually submit anything')\n\n def command(self):\n cmd = XloaderCmd(self.options.dry_run)\n if not self.args:\n print(self.usage)\n sys.exit(1)\n if self.args[0] == 'submit':\n if len(self.args) < 2:\n self.parser.error('This command requires an argument')\n if self.args[1] == 'all':\n self._load_config()\n cmd._setup_xloader_logger()\n cmd._submit_all()\n elif self.args[1] == 'all-existing':\n self._confirm_or_abort()\n self._load_config()\n cmd._setup_xloader_logger()\n cmd._submit_all_existing()\n else:\n pkg_name_or_id = self.args[1]\n self._load_config()\n cmd._setup_xloader_logger()\n cmd._submit_package(pkg_name_or_id)\n self._handle_command_status(cmd.error_occured)\n elif self.args[0] == 'status':\n self._load_config()\n cmd.print_status()\n else:\n self.parser.error('Unrecognized command')\n\n def _handle_command_status(self, error_occured):\n if error_occured:\n print('Finished but saw errors - see above for details')\n sys.exit(1)\n\n def _confirm_or_abort(self):\n if self.options.yes or self.options.dry_run:\n return\n question = (\n \"Data in any datastore resource that isn't in their source files \"\n \"(e.g. data added using the datastore API) will be permanently \"\n \"lost. Are you sure you want to proceed?\"\n )\n answer = cli.query_yes_no(question, default=None)\n if not answer == 'yes':\n print(\"Aborting...\")\n sys.exit(0)\n\n\nclass MigrateTypesCommand(cli.CkanCommand):\n '''Migrate command\n\n Turn existing resource field types into Data Dictionary overrides.\n This is intended to simplify migration from DataPusher to XLoader,\n by allowing you to reuse the types that DataPusher has guessed.\n\n Usage:\n\n migrate_types [options] [resource-spec]\n Add the given resources' field types to the Data Dictionary.\n\n where resource-spec is one of:\n\n <resource-id> - Migrate a particular resource\n\n all - Migrate all resources (this is the default)\n\n '''\n summary = __doc__.split('\\n')[0]\n usage = __doc__\n min_args = 0\n\n def __init__(self, name):\n super(MigrateTypesCommand, self).__init__(name)\n self.error_occured = False\n\n self.parser.add_option('-t', '--include-text',\n action='store_true', default=False,\n help='Add Data Dictionary overrides even for text fields')\n\n self.parser.add_option('--force',\n action='store_true', default=False,\n help='Overwrite existing data dictionary if it exists')\n\n def command(self):\n self._load_config()\n if not self.args or len(self.args) == 0 or self.args[0] == 'all':\n self._migrate_all()\n else:\n self._migrate_resource(self.args[0])\n self._handle_command_status()\n\n def _migrate_all(self):\n session = model.Session\n resource_count = session.query(model.Resource).filter_by(state='active').count()\n print(\"Updating {} resource(s)\".format(resource_count))\n resources_done = 0\n for resource in session.query(model.Resource).filter_by(state='active'):\n resources_done += 1\n self._migrate_resource(resource.id,\n prefix='[{}/{}]: '.format(resources_done,\n resource_count))\n if resources_done % 100 == 0:\n print(\"[{}/{}] done\".format(resources_done, resource_count))\n print(\"[{}/{}] done\".format(resources_done, resource_count))\n\n def _migrate_resource(self, resource_id, prefix=''):\n data_dict = h.datastore_dictionary(resource_id)\n\n def print_status(status):\n if self.options.verbose:\n print(\"{}{}: {}\".format(prefix, resource_id, status))\n\n if not data_dict:\n print_status(\"not found\")\n return\n\n fields = []\n for field in data_dict:\n if field['type'] == 'text' and not self.options.include_text:\n type_override = ''\n else:\n type_override = field['type']\n\n if 'info' not in field:\n field.update({'info': {'notes': '',\n 'type_override': type_override,\n 'label': ''}})\n elif self.options.force:\n field['info'].update({'type_override': type_override})\n else:\n print_status(\"skipped\")\n return\n\n fields.append({\n 'id': field['id'],\n 'type': field['type'],\n 'info': field['info']\n })\n\n try:\n p.toolkit.get_action('datastore_create')(None, {\n 'resource_id': resource_id,\n 'force': True,\n 'fields': fields\n })\n print_status(\"updated\")\n except Exception as e:\n self.error_occured = True\n print(\"{}: failed, {}\".format(resource_id, e))\n\n def _handle_command_status(self):\n if self.error_occured:\n print('Finished but saw errors - see above for details')\n sys.exit(1)\n","repo_name":"armphanu/ckanext-xloader-armphanu","sub_path":"ckanext/xloader/paster.py","file_name":"paster.py","file_ext":"py","file_size_in_byte":7766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74886694666","text":"\"\"\"\r\nUpdating the Webservices AutoSupport configuration example.\r\n\r\nUsage:\r\n update_auto_support_configuration\r\n update_auto_support_configuration -h\r\nOptions:\r\n -h --help Show this screen.\r\n --version Show version.\r\n\"\"\"\r\n\r\nimport docopt\r\nimport json\r\nimport logging\r\nimport requests\r\n\r\nfrom base import Properties, get_session\r\n\r\nPROPS = Properties()\r\nLOG = logging.getLogger(__name__)\r\n\r\ndef update_auto_support_configuration():\r\n # Get a connection\r\n connection = get_session()\r\n\r\n # Set the desired AutoSupport configuration\r\n data = {\r\n \"autoSupportEnabled\": True\r\n }\r\n\r\n try:\r\n LOG.info(\"Updating the AutoSupport configuration.\")\r\n result = connection.post('http://{server}/devmgr/v2/auto-support/configuration'.format(server=PROPS.server), data=json.dumps(data))\r\n try:\r\n result.raise_for_status()\r\n except requests.HTTPError as e:\r\n LOG.error(\"Update AutoSupport configuration attempt failed || http status code: %s\" % str(e.response.status_code))\r\n raise\r\n else:\r\n LOG.info(\"Update AutoSupport configuration attempt succeeded\")\r\n\r\n LOG.info(\"Retrieving current AutoSupport configuration\")\r\n result = connection.get('http://{server}/devmgr/v2/auto-support/configuration'.format(server=PROPS.server))\r\n try:\r\n result.raise_for_status()\r\n except requests.HTTPError as e:\r\n LOG.error(\"AutoSupport configuration retrieval attempt failed || http status code: %s\" % str(e.response.status_code))\r\n raise\r\n except Exception as e:\r\n LOG.error(\"Server connection failured\")\r\n raise\r\n\r\nif __name__ == '__main__':\r\n logging.basicConfig(level=logging.DEBUG,\r\n format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\\n %(message)s')\r\n args = docopt.docopt(__doc__)\r\n update_auto_support_configuration()\r\n","repo_name":"NetApp/webservices-samples","sub_path":"auto_support/update_auto_support_configuration.py","file_name":"update_auto_support_configuration.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"10371851234","text":"# 参考官方做法\n\nclass Solution:\n def firstMissingPositive(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n\n length = len(nums)\n\n if 1 not in nums:\n return 1\n elif length == 1:\n # 如果长度是1而且有数字1,说明一定是[2]\n return 2\n\n # 将小于1的整数和大于length的数字设置为1\n for i in range(length):\n if nums[i] <= 0 or nums[i] > length:\n nums[i] = 1\n\n # 使用索引和数字符号作为检查器\n for i in range(length):\n # 如果发现了一个数字 a - 改变第 a 个元素的符号\n a = abs(nums[i])\n if a == length:\n # 由于没有下标 n ,使用下标 0 的元素保存是否存在数字 n。\n nums[0] = - abs(nums[0])\n else:\n # 当读到数字 a 时,替换第 a 个元素的符号。\n nums[a] = - abs(nums[a])\n\n # 第一个正数的下标就是第一个缺失的数\n for i in range(1, length):\n if nums[i] > 0:\n return i\n\n if nums[0] > 0:\n # 由于使用了第一个位置来表示是否第length个数字\n return length\n # 如果全都没有确实就返回length+1\n return length + 1\n","repo_name":"strawsyz/straw","sub_path":"ProgrammingQuestions/leetcode/41.py","file_name":"41.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"40964204496","text":"import requests\nfrom bs4 import BeautifulSoup\n\nwith open(\"sightseeing.txt\", \"r\", encoding=\"utf-8\") as f:\n data = f.readlines()\nf.close()\n\nfor url in data:\n url = url.replace(\"\\n\", \"\")\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"html.parser\")\n if soup.find(name=\"h1\", attrs={\"class\":\"media-heading\"}) == None:\n continue\n name = soup.find(name=\"h1\", attrs={\"class\":\"media-heading\"}).text\n name = name.replace(\"旅行完全ガイド!おすすめ観光スポット・グルメ・お土産をチェック\", \"\")\n soup = soup.find_all(name=\"a\", attrs={\"class\":\"btn btn-item-link noext\"})\n datalist = []\n url_list = []\n for i in soup:\n datalist.append(i.get(\"href\"))\n url_list += (datalist[:4])\n url_list += (datalist[6:9])\n for i in url_list:\n response = requests.get(i)\n s = BeautifulSoup(response.text, \"html.parser\")\n title = s.find_all(name=\"h2\", attrs={\"class\":\"item-body\"})\n text = s.find_all(name=\"div\", attrs={\"class\":\"item-body-hbr\"})\n with open(f\"data_sightseeing/{name}_sightseeing.txt\", \"a\", encoding=\"utf-8\") as f:\n for j, k in zip(title, text):\n f.write(j.text + \"\\n\")\n f.write(k.text + \"\\n\")\n f.close()\n","repo_name":"Akira0809/hakata","sub_path":"sightseeing.py","file_name":"sightseeing.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71247857225","text":"import tkinter as tk\n\nclass Person:\n def __init__(self):\n self.fname = []\n self.lname = []\n self.idcode = []\n self.birthdate = []\n\n def read_fname(self):\n self.fname = input(\"Enter first name: \")\n\n def read_lname(self):\n self.lname = input(\"Enter last name: \")\n\n def read_idcode(self):\n self.idcode = input(\"Enter ID code: \")\n\n def read_birthdate(self):\n while True:\n try:\n year = int(input(\"Enter birth year: \"))\n if 1970 <= year <= 2010:\n self.birthdate = year\n break\n else:\n print(\"The birth year should be between 1970 and 2010.\")\n except ValueError:\n print(\"Invalid input. Please enter a valid birth year.\")\n\n def show_fullname(self):\n print(\"Full name:\", self.fname, self.lname)\n\n def show_idcode(self):\n print(\"ID code:\", self.idcode)\n\n def show_birthdate(self):\n print(\"Birthdate:\", self.birthdate)\n\n def show_birthdate(self):\n print(\"Birthdate:\", self.birthdate)\n\n\nclass Coach(Person):\n def __init__(self):\n super().__init__()\n self.card_type = []\n\n def read_card_and_ranking(self):\n valid_cards = [\"A\", \"B\", \"C\"]\n while True:\n card = input(\"Enter the coaching card type (A/B/C): \")\n if card.upper() in valid_cards:\n self.card_type = card.upper()\n break\n else:\n print(\"Invalid coaching card type. Please choose one of the following: A, B, C.\")\n\n self.team_ranking = int(input(\"Enter the team's ranking: \"))\n\n def show_card_type(self):\n print(\"Card type:\", self.card_type)\n \n def read_coach_info(self):\n self.read_fname()\n self.read_lname()\n self.read_birthdate()\n self.read_idcode()\n self.read_card_and_ranking()\n \n def show_caoch_info(self):\n super().show_fullname()\n super().show_idcode()\n super().show_birthdate()\n print(\"Card Type: \", self.card_type)\n print(\"ranking: \", self.team_ranking)\n\n\n\nclass Player(Person):\n def __init__(self):\n super().__init__()\n self.post = []\n self.goal = []\n self.height = []\n self.weight = []\n self.nationality = []\n self.goal = 0\n self.passes = 0\n self.scored_goal = False\n self.ball_taken = 0\n self.passes_given = 0\n self.clean_sheets = 0\n self.shot = 0\n self.is_foreign = False\n self.has_card = False\n self.num_cards = 0\n self.num_foul = 0\n \n\n \n def read_post(self):\n valid_modes = [\"attack\", \"midfielder\", \"defense\", \"goalkeeper\"]\n while True:\n mode = input(\"Enter player's position (attack/midfielder/defense/goalkeeper): \")\n if mode.lower() in valid_modes:\n self.post = mode.lower()\n break\n else:\n print(\"Invalid position. Please choose one of the following: attack, midfielder, defense, goalkeeper.\")\n \n\n def read_stats(self):\n if self.post.lower() == \"attack\":\n self.goal = int(input(\"Enter the number of goals scored this season: \"))\n self.shot = int(input(\"Enter the number of successful shots this season: \"))\n\n elif self.post.lower() == \"midfielder\":\n self.passes = int(input(\"Enter the number of passes made this season: \"))\n self.shot = int(input(\"Enter the number of successful shots this season: \"))\n\n choice = input(\"Did the player score a goal this season? (y/n): \")\n if choice == 'y':\n self.goal= int(input('how many goals? '))\n\n choice = input(\"Did he make a foul on the opponent?? (y/n): \")\n if choice == 'y':\n self.num_foul= int(input('how many fouls? '))\n\n elif self.post.lower() == \"defense\":\n self.num_foul = int(input(\"Enter the number of times the opponent has been fouled \"))\n self.ball_taken = int(input(\"Enter the number of times the ball was taken from opponents: \"))\n self.passes_given = int(input(\"Enter the number of passes given to team mates: \"))\n card_option = input(\"Did the player receive a card this season? (y/n): \")\n if card_option == \"y\":\n self.has_card = True\n self.num_cards = int(input(\"Enter the number of cards received: \"))\n\n elif self.post.lower() == \"goalkeeper\":\n self.clean_sheets = int(input(\"Enter the number of clean sheets made this season: \"))\n \n \n\n def read_goal(self):\n self.goal = input('enter player goals')\n \n def read_height(self):\n while True:\n try:\n height = int(input(\"Enter height : \"))\n if 160 <= height <= 210:\n self.height = height\n break\n else:\n print(\"The height should be between 160 and 210.\")\n except ValueError:\n print(\"Invalid input.\")\n\n def read_weight(self):\n while True:\n try:\n weight = int(input(\"Enter weight : \"))\n if 50 <= weight <= 110:\n self.weight = weight\n break\n else:\n print(\"The weight should be between 50 and 110.\")\n except ValueError:\n print(\"Invalid input.\")\n\n def read_nationality(self):\n self.nationality = input(\"Enter player's nationality: \")\n foreign_option = input(\"Is the player a foreign player? (y/n): \")\n if foreign_option.lower() == \"y\":\n self.is_foreign = True\n\n\n\n def read_palyer_info(self):\n self.read_fname()\n self.read_lname()\n self.read_idcode()\n self.read_birthdate()\n self.read_height()\n self.read_weight()\n self.read_nationality()\n self.read_post()\n self.read_stats()\n \n def show_height(self):\n print(self.height)\n\n\n def show_player_info(self):\n super().show_fullname()\n super().show_idcode()\n super().show_birthdate()\n print(\"post:\", self.post)\n print(\"Height:\", self.height)\n print(\"Weight:\", self.weight)\n print(\"Nationality:\", self.nationality)\n if self.post.lower() == \"attack\":\n print('goal count ', self.goal)\n print('shot count ', self.shot)\n elif self.post.lower() == 'midfielder':\n print('pass count', self.passes)\n print('shot count', self.shot)\n print('goal count' , self.goal)\n print('Foul count' , self.num_foul)\n elif self.post.lower() == 'defense':\n print('ball_taken', self.ball_taken)\n print('passes_given', self.passes_given)\n print('has_card', self.has_card)\n print('num card', self.num_cards)\n elif self.post.lower() == 'goalkeeper':\n print('clean_sheets' , self.clean_sheets)\n \n # elif self == self.team.captain:\n # print('Captain')\n \n\n\nclass Team:\n MAX_PLAYERS_PER_TEAM = 11\n\n def __init__(self):\n self.team_name = []\n self.team_code = []\n self.players = []\n self.coach = Coach()\n\n def read_team_name(self):\n self.team_name = input(\"Enter team name: \")\n\n def read_team_code(self):\n self.team_code = input(\"Enter team code: \")\n\n def show_team_name(self):\n print(\"Team name: \", self.team_name)\n\n def show_team_code(self):\n print(\"Team code: \", self.team_code)\n\n def read_team_info(self):\n self.read_team_name()\n self.read_team_code()\n print(\"---------------------------\")\n print(\"Coach Info:\")\n self.coach.read_coach_info()\n print(\"---------------------------\")\n num_players = int(input(\"Enter the number of players in the team: \"))\n for i in range(num_players):\n print(\"Player\", i+1, \"Info:\")\n player = Player()\n player.read_palyer_info()\n self.players.append(player)\n print(\"---------------------------\")\n\n print(\"---------------------------\")\n print(\"Choose Captain:\")\n self.choose_captain()\n\n\n def choose_captain(self):\n if self.players:\n print(\"Select the captain from the following players:\")\n for i, player in enumerate(self.players):\n print(f\"{i+1}. {player.fname} {player.lname}\")\n\n choice = int(input(\"Enter the number of the chosen captain: \"))\n\n if 1 <= choice <= len(self.players):\n captain = self.players[choice - 1]\n print('--------------------------------------')\n print(f\"{captain.fname} {captain.lname} is the captain of {self.team_name}.\")\n print('--------------------------------------')\n return captain\n else:\n print(\"Please try again there is no captain.\")\n else:\n print(\"No players in the team. Cannot choose a captain.\")\n\n return None \n \n # if num_players > 11:\n # choice = input(\"Do you want to add a reserve player? (y/n): \")\n # if choice.lower() == \"y\":\n # print(\"Reserve Player Info:\")\n # reserve_player = Player()\n # reserve_player.read_player_info()\n # self.players.append(reserve_player)\n # print(\"---------------------------\")\n # else:\n # break\n\n def show_team_info(self):\n print(\"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\")\n self.show_team_name()\n self.show_team_code()\n print(\"\\n---------------------------\")\n print(\"Coach Info:\")\n self.coach.show_caoch_info()\n print(\"\\n---------------------------\")\n for i, player in enumerate(self.players):\n print(\"\\nPlayer\", i+1, \"Info:\")\n player.show_player_info()\n print(\"\\n---------------------------\")\n\n\n\n\n # def transfer_player(self, player, new_team):\n # if player in self.players:\n # if len(new_team.players) < MAX_PLAYERS_PER_TEAM:\n # self.players.remove(player)\n # new_team.players.append(player)\n # player.team = new_team\n # print(f\"Player {player.fname} {player.lname} transferred from {self.team_name} to {new_team.team_name}.\")\n # return True\n # else:\n # print(f\"The new team {new_team.team_name} has reached the maximum number of players.\")\n # else:\n # print(f\"Player {player.fname} {player.lname} is not in {self.team_name}.\")\n\n # return False\n \n\n def player_statistics(self):\n for i, player in enumerate(self.players):\n print(\"\\nPlayer\", i+1, \"Info:\")\n player.show_player_info(self)\n print(\"\\n---------------------------\")\n\n def get_foreign_players_count(self):\n foreign_players_count = 0\n for player in self.players:\n if player.is_foreign:\n foreign_players_count += 1\n return foreign_players_count\n\n\nclass League:\n def __init__(self):\n self.teams = []\n self.player = Player()\n\n def read_team_info(self):\n team = Team()\n team.read_team_info()\n self.teams.append(team)\n\n def display_all_teams(self):\n for team in self.teams:\n team.show_team_info()\n \n def display_coaches_with_teams(self):\n for team in self.teams:\n print(f\"Team: {team.team_name}\")\n team.coach.show_caoch_info()\n print(\"---------------------------\")\n\n def display_team_by_code(self):\n code = input(\"Enter team's code: \")\n found = False\n for team in self.teams:\n if team.team_code == code:\n team.show_team_info()\n found = True\n break\n if not found:\n print(\"No team found with that code.\")\n\n\n def display_team_by_coach(self):\n id_code = input(\"Enter coach's ID code: \")\n found = False\n for team in self.teams:\n if team.coach.idcode == id_code:\n team.show_team_name()\n found = True\n break\n if not found:\n print(\"No coach found with that ID code.\")\n\n\n def display_team_by_player(self):\n id_code = input(\"Enter player's ID code: \")\n found = False\n for team in self.teams:\n for player in team.players:\n if player.idcode == id_code:\n team.show_team_name()\n found = True\n break\n if found:\n break\n if not found:\n print(\"No player found with that ID code.\")\n\n\n def display_players_by_name(self):\n for team in self.teams:\n for player in team.players:\n if player.fname.lower() == \"ali\":\n print(player.fname, player.lname, \"(\", team.team_name, \")\")\n print(\"---------------------\")\n print(\"*************************\")\n\n\n def display_players_over_30(self):\n current_year = 2023\n for team in self.teams:\n for Player in team.players:\n birth_year = int(Player.birthdate)\n age = current_year - birth_year\n if age > 30:\n print(Player.show_fullname(), \"(\", age, \"years old )\")\n print(\"---------------------\")\n \n def display_players_height(self):\n for team in self.teams:\n for player in team.players:\n print(player.show_fullname(), \"(\", player.height, \"CM)\")\n\n\n def get_team_with_most_foreign_players(self):\n team_with_most_foreign_players = None\n max_foreign_players_count = 0\n\n for team in self.teams:\n foreign_players_count = team.get_foreign_players_count()\n\n if foreign_players_count > max_foreign_players_count:\n max_foreign_players_count = foreign_players_count\n team_with_most_foreign_players = team\n\n return team_with_most_foreign_players\n\n def get_player_with_most_goals(self):\n player_with_most_goals = None\n max_goals = 0\n\n for team in self.teams:\n for player in team.players:\n if player.goal > max_goals:\n max_goals = player.goal\n player_with_most_goals = player\n\n return player_with_most_goals\n \n def get_player_with_most_passes(self):\n player_with_most_passes = None\n max_passes = 0\n\n for team in self.teams:\n for player in team.players:\n if player.passes > max_passes:\n max_passes = player.passes\n player_with_most_passes = player\n\n return player_with_most_passes\n \n def get_player_with_most_shots_on_goal(self):\n player_with_most_shots = None\n max_shots = 0\n\n for team in self.teams:\n for player in team.players:\n if player.shot > max_shots:\n max_shots = player.shot\n player_with_most_shots = player\n\n return player_with_most_shots\n \n \n def get_goalkeeper_with_most_clean_sheets(self):\n goalkeeper_with_most_clean_sheets = None\n max_clean_sheets = 0\n\n for team in self.teams:\n for player in team.players:\n if isinstance(player) and player.clean_sheets > max_clean_sheets:\n max_clean_sheets = player.clean_sheets\n goalkeeper_with_most_clean_sheets = player\n\n return goalkeeper_with_most_clean_sheets\n\n def get_team_with_most_fouls(self):\n max_fouls = 0\n fouling_team = None\n\n for team in self.teams:\n total_fouls = 0\n for player in team.players:\n total_fouls += player.fouls\n\n if total_fouls > max_fouls:\n max_fouls = total_fouls\n fouling_team = team\n\n return fouling_team\n\n # def get_players_nationality(self):\n # nationalities = []\n # for player in self.players:\n # nationalities.append(player.nationality)\n # return nationalities\n\nclass TeamInfoGUI:\n def __init__(self):\n self.window = tk.Tk()\n self.window.title(\"Team Information\")\n\n self.teams = []\n self.coaches = []\n self.players = []\n\n self.team_name_label = tk.Label(self.window, text=\"Team Name:\")\n self.team_name_entry = tk.Entry(self.window)\n self.team_code_label = tk.Label(self.window, text=\"Team Code:\")\n self.team_code_entry = tk.Entry(self.window)\n\n self.coach_name_label = tk.Label(self.window, text=\"Coach Name:\")\n self.coach_name_entry = tk.Entry(self.window)\n self.coach_last_name_label = tk.Label(self.window, text=\"Coach Last Name:\")\n self.coach_last_name_entry = tk.Entry(self.window)\n self.coach_dob_label = tk.Label(self.window, text=\"Date of Birth:\")\n self.coach_dob_entry = tk.Entry(self.window)\n self.coach_id_label = tk.Label(self.window, text=\"ID Code:\")\n self.coach_id_entry = tk.Entry(self.window)\n self.coach_card_label = tk.Label(self.window, text=\"Card Type:\")\n self.coach_card_entry = tk.Entry(self.window)\n self.coach_rank_label = tk.Label(self.window, text=\"Team Rank:\")\n self.coach_rank_entry = tk.Entry(self.window)\n\n self.player_name_label = tk.Label(self.window, text=\"Player Name:\")\n self.player_name_entry = tk.Entry(self.window)\n self.player_last_name_label = tk.Label(self.window, text=\"Player Last Name:\")\n self.player_last_name_entry = tk.Entry(self.window)\n self.player_dob_label = tk.Label(self.window, text=\"Date of Birth:\")\n self.player_dob_entry = tk.Entry(self.window)\n self.player_id_label = tk.Label(self.window, text=\"ID Code:\")\n self.player_id_entry = tk.Entry(self.window)\n self.player_position_label = tk.Label(self.window, text=\"Player Position:\")\n self.player_position_entry = tk.Entry(self.window)\n\n self.get_team_info_button = tk.Button(self.window, text=\"Get Team Info\", command=self.show_team_info)\n self.get_coach_info_button = tk.Button(self.window, text=\"Get Coach Info\", command=self.show_coach_info)\n self.get_player_info_button = tk.Button(self.window, text=\"Get Player Info\", command=self.show_player_info)\n\n self.info_label = tk.Label(self.window, text=\"\")\n\n\n\n self.team_name_label.grid(row=0, column=0, sticky=tk.E)\n self.team_name_entry.grid(row=0, column=1)\n self.team_code_label.grid(row=1, column=0, sticky=tk.E)\n self.team_code_entry.grid(row=1, column=1)\n\n self.coach_name_label.grid(row=0, column=2, sticky=tk.E)\n self.coach_name_entry.grid(row=0, column=3)\n self.coach_last_name_label.grid(row=1, column=2, sticky=tk.E)\n self.coach_last_name_entry.grid(row=1, column=3)\n self.coach_dob_label.grid(row=2, column=2, sticky=tk.E)\n self.coach_dob_entry.grid(row=2, column=3)\n self.coach_id_label.grid(row=3, column=2, sticky=tk.E)\n self.coach_id_entry.grid(row=3, column=3)\n self.coach_card_label.grid(row=4, column=2, sticky=tk.E)\n self.coach_card_entry.grid(row=4, column=3)\n self.coach_rank_label.grid(row=5, column=2, sticky=tk.E)\n self.coach_rank_entry.grid(row=5, column=3)\n\n self.player_name_label.grid(row=0, column=4, sticky=tk.E)\n self.player_name_entry.grid(row=0, column=5)\n self.player_last_name_label.grid(row=1, column=4, sticky=tk.E)\n self.player_last_name_entry.grid(row=1, column=5)\n self.player_dob_label.grid(row=2, column=4, sticky=tk.E)\n self.player_dob_entry.grid(row=2, column=5)\n self.player_id_label.grid(row=3, column=4, sticky=tk.E)\n self.player_id_entry.grid(row=3, column=5)\n self.player_position_label.grid(row=4, column=4, sticky=tk.E)\n self.player_position_entry.grid(row=4, column=5)\n\n self.get_team_info_button.grid(row=6, column=0, columnspan=2)\n self.get_coach_info_button.grid(row=6, column=2, columnspan=2)\n self.get_player_info_button.grid(row=6, column=4, columnspan=2)\n\n self.info_label.grid(row=7, column=0, columnspan=6)\n\n self.display_all_teams_button = tk.Button(self.window, text=\"Display All Teams\", command=self.display_all_teams)\n self.display_team_by_code_button = tk.Button(self.window, text=\"Display Team by Code\", command=self.display_team_by_code)\n self.display_team_by_coach_button = tk.Button(self.window, text=\"Display Team by Coach\", command=self.display_team_by_coach)\n self.display_team_by_player_button = tk.Button(self.window, text=\"Display Team by Player\", command=self.display_team_by_player)\n self.display_players_by_name_button = tk.Button(self.window, text=\"Display Players by Name\", command=self.display_players_by_name)\n self.display_players_over_30_button = tk.Button(self.window, text=\"Display Players Over 30\", command=self.display_players_over_30)\n self.display_coaches_with_teams_button = tk.Button(self.window, text=\"Display Coaches with Teams\", command=self.display_coaches_with_teams)\n self.display_players_height_button = tk.Button(self.window, text=\"Display Players Height\", command=self.display_players_height)\n\n self.display_all_teams_button.grid(row=8, column=0, columnspan=2)\n self.display_team_by_code_button.grid(row=8, column=2, columnspan=2)\n self.display_team_by_coach_button.grid(row=8, column=4, columnspan=2)\n self.display_team_by_player_button.grid(row=9, column=0, columnspan=2)\n self.display_players_by_name_button.grid(row=9, column=2, columnspan=2)\n self.display_players_over_30_button.grid(row=9, column=4, columnspan=2)\n self.display_coaches_with_teams_button.grid(row=10, column=0, columnspan=2)\n self.display_players_height_button.grid(row=10, column=2, columnspan=2)\n\n def show_team_info(self):\n team_name = self.team_name_entry.get()\n team_code = self.team_code_entry.get()\n\n team_info = f\"Team Name: {team_name}\\nTeam Code: {team_code}\"\n self.teams.append(team_info)\n\n self.team_name_entry.delete(0, tk.END)\n self.team_code_entry.delete(0, tk.END)\n\n self.info_label.config(text=\"Team information saved.\")\n\n def show_coach_info(self):\n coach_name = self.coach_name_entry.get()\n coach_last_name = self.coach_last_name_entry.get()\n coach_dob = self.coach_dob_entry.get()\n coach_id = self.coach_id_entry.get()\n coach_card = self.coach_card_entry.get()\n coach_rank = self.coach_rank_entry.get()\n\n coach_info = f\"Name: {coach_name}\\nLast Name: {coach_last_name}\\nDOB: {coach_dob}\\nID: {coach_id}\\nCard: {coach_card}\\nRank: {coach_rank}\"\n self.coaches.append(coach_info)\n\n self.coach_name_entry.delete(0, tk.END)\n self.coach_last_name_entry.delete(0, tk.END)\n self.coach_dob_entry.delete(0, tk.END)\n self.coach_id_entry.delete(0, tk.END)\n self.coach_card_entry.delete(0, tk.END)\n self.coach_rank_entry.delete(0, tk.END)\n\n self.info_label.config(text=\"Coach information saved.\")\n\n def show_player_info(self):\n player_name = self.player_name_entry.get()\n player_last_name = self.player_last_name_entry.get()\n player_dob = self.player_dob_entry.get()\n player_id = self.player_id_entry.get()\n player_position = self.player_position_entry.get()\n info = f\"Player Name: {player_name}\\nLast Name: {player_last_name}\\nDate of Birth: {player_dob}\\nID Code: {player_id}\\nPosition: {player_position}\"\n self.info_label.config(text=info)\n\n def save_player_info(self):\n player_name = self.player_name_entry.get()\n player_last_name = self.player_last_name_entry.get()\n player_dob = self.player_dob_entry.get()\n player_id = self.player_id_entry.get()\n player_position = self.player_position_entry.get()\n\n player_info = f\"Name: {player_name}\\nLast Name: {player_last_name}\\nDOB: {player_dob}\\nID: {player_id}\\nPosition: {player_position}\"\n self.players.append(player_info)\n\n self.player_name_entry.delete(0, tk.END)\n self.player_last_name_entry.delete(0, tk.END)\n self.player_dob_entry.delete(0, tk.END)\n self.player_id_entry.delete(0, tk.END)\n self.player_position_entry.delete(0, tk.END)\n\n self.info_label.config(text=\"Player information saved.\")\n\n def display_all_teams(self):\n if self.teams:\n team_info = \"\\n\".join(self.teams)\n self.info_label.config(text=team_info)\n else:\n self.info_label.config(text=\"No team information available.\")\n self.info_label.config(text=team_info)\n \n\n def display_team_by_code(self):\n team_code = self.team_code_entry.get()\n team_info = [team for team in self.teams if team_code in team]\n if team_info:\n team_info_str = \"\\n\".join(team_info)\n self.info_label.config(text=team_info_str)\n else:\n self.info_label.config(text=\"No team found with the provided code.\")\n\n def display_team_by_coach(self):\n coach_name = self.coach_name_entry.get()\n coach_info = [coach for coach in self.coaches if coach_name in coach]\n if coach_info:\n coach_info_str = \"\\n\".join(coach_info)\n self.info_label.config(text=coach_info_str)\n else:\n self.info_label.config(text=\"No coach found with the provided name.\")\n\n def display_team_by_player(self):\n player_name = self.player_name_entry.get()\n player_info = [player for player in self.players if player_name in player]\n if player_info:\n player_info_str = \"\\n\".join(player_info)\n self.info_label.config(text=player_info_str)\n else:\n self.info_label.config(text=\"No player found with the provided name.\")\n\n def display_players_by_name(self):\n if self.players:\n sorted_players = sorted(self.players)\n player_info = \"\\n\".join(sorted_players)\n self.info_label.config(text=player_info)\n else:\n self.info_label.config(text=\"No player information available.\")\n\n def display_players_over_30(self):\n player_info = [player for player in self.players if \"DOB: \" in player and int(player.split(\"DOB: \")[-1]) > 30]\n if player_info:\n player_info_str = \"\\n\".join(player_info)\n self.info_label.config(text=player_info_str)\n else:\n self.info_label.config(text=\"No players over 30 found.\")\n\n def display_coaches_with_teams(self):\n if self.coaches:\n coach_info = \"\\n\".join(self.coaches)\n self.info_label.config(text=coach_info)\n else:\n self.info_label.config(text=\"No coach information available.\")\n\n def display_players_height(self):\n player_info = [player for player in self.players if \"Height: \" in player]\n if player_info:\n player_info_str = \"\\n\".join(player_info)\n self.info_label.config(text=player_info_str)\n else:\n self.info_label.config(text=\"No player height information available.\")\n\n\n\n def run(self):\n self.window.mainloop()\n\nif __name__ == \"__main__\":\n app = TeamInfoGUI()\n app.run()\n\n\n","repo_name":"farzzad01/Football-team-management","sub_path":"teams.py","file_name":"teams.py","file_ext":"py","file_size_in_byte":27971,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"22559886331","text":"from database_setup import Base, Restaurant, MenuItem\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nengine = create_engine('sqlite:///restaurantmenu.db')\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\nmyFirstRestaurant = Restaurant(name=\"pizza Palace\")\n\n\ncheesepizza = MenuItem(\n name=\"Cheese Pizza\",\n description=\"Made with all natural ingredient and fresh mozzarella\",\n course=\"Entree\",\n price='$8.99',\n restaurant=myFirstRestaurant\n)\n\n# session.add(cheesepizza)\n# session.add(myFirstRestaurant)\n# session.commit()\nfirstResult = session.query(Restaurant).first()\nMenuquery = session.query(MenuItem)\n\nitems = session.query(MenuItem).all()\n\nfor item in items:\n print(item.name)\n print(item.name + \" \" + item.description + \" \" + item.price)\n\nBase.metadata.bind = engine\n","repo_name":"coloed3/Crud-BaseHTTP","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9607307591","text":"# -*- conding:utf-8 -*-\n#Author:lyc\nimport os, sys\n\n# 题目:\n# 给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。\n#\n# 示例:\n# 输入: [-2,1,-3,4,-1,2,1,-5,4],\n# 输出: 6\n# 解释: 连续子数组 [4,-1,2,1] 的和最大,为 6。\n#\n# 进阶:\n# 如果你已经实现复杂度为 O(n) 的解法,尝试使用更为精妙的分治法求解。\n\nclass Solution:\n def maxSubArray(self, nums: [int]) -> int:\n \"\"\"\n 动态规划解法\n :param nums:\n :return:\n \"\"\"\n # 如果输入为空,直接返回\n if not nums:return 0\n result = [0 for i in range(len(nums))]\n result[0] = nums[0]\n maxV = nums[0]\n for i in range(1,len(nums)):\n result[i] = max(result[i-1]+nums[i],nums[i])\n maxV = max(result[i],maxV)\n return maxV\n\n\n\nresult = Solution()\nnums = [-2,1,-3,4,-1,2,1,-5,4]\nprint(result.maxSubArray(nums))\n","repo_name":"g-lyc/LeetCode","sub_path":"Python/t53.py","file_name":"t53.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"25064472700","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass TransformerLayer(nn.Module):\r\n def __init__(self, hidden_size, num_heads, dropout_rate):\r\n super(TransformerLayer, self).__init__()\r\n self.self_attention = nn.MultiheadAttention(hidden_size, num_heads, dropout=dropout_rate)\r\n self.feed_forward = nn.Sequential(\r\n nn.Linear(hidden_size, 4 * hidden_size),\r\n nn.ReLU(),\r\n nn.Linear(4 * hidden_size, hidden_size)\r\n )\r\n self.dropout = nn.Dropout(dropout_rate)\r\n self.layer_norm = nn.LayerNorm(hidden_size)\r\n\r\n def forward(self, inputs):\r\n attention_output, _ = self.self_attention(inputs, inputs, inputs)\r\n attention_output = self.dropout(attention_output)\r\n attention_output = self.layer_norm(inputs + attention_output)\r\n\r\n ff_output = self.feed_forward(attention_output)\r\n ff_output = self.dropout(ff_output)\r\n output = self.layer_norm(attention_output + ff_output)\r\n return output\r\nclass Transformer(nn.Module):\r\n def __init__(self, hidden_size, num_layers, num_heads, dropout_rate):\r\n super(Transformer, self).__init__()\r\n self.embedding = nn.Embedding(vocab_size, hidden_size)\r\n self.transformer_layers = nn.ModuleList([\r\n TransformerLayer(hidden_size, num_heads, dropout_rate) for _ in range(num_layers)\r\n ])\r\n\r\n def forward(self, inputs):\r\n embeddings = self.embedding(inputs)\r\n outputs = embeddings\r\n\r\n for layer in self.transformer_layers:\r\n outputs = layer(outputs)\r\n\r\n return outputs\r\nhidden_size = 512\r\nnum_layers = 6\r\nnum_heads = 8\r\ndropout_rate = 0.1\r\nvocab_size = 1000\r\n\r\nmodel = Transformer(hidden_size, num_layers, num_heads, dropout_rate)\r\ninputs = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) # 输入数据的形状为 [batch_size, sequence_length]\r\noutputs = model(inputs)\r\nprint(\"输入是{},输出是{}\\n\".format(inputs,outputs))\r\nprint(\"输入的shape是{},输出的shape是{}\".format(inputs.shape,outputs.shape))\r\n","repo_name":"ArcFYB/fiko_storage","sub_path":"transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43068039962","text":"import json\nimport sys\n\nresult = []\nwith open(sys.argv[1]) as json_data:\n data = json.load(json_data)\n for i in data:\n result.append([i['location']['latitude'], i['location']['longitude']])\n\nprint(result)","repo_name":"theriley106/Mobey","sub_path":"extractLongLat.py","file_name":"extractLongLat.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29950967983","text":"import argparse\n\n\"\"\"\nTest rig\n\nGoals:\n- iterate through a long list of datasets/methods/hyperparams\n- Save everything that might be useful - this is partially exploratory, i.e. looking for\ndifferences between batch and minibatch GD\n- Maybe have a 'run' class? That saves per-iteration values and has plot methods for easily comparing\nruns over different measures\n\n\nThings to save:\n - Experiment spec object, which should contain all data needed to reproduce the experiment\n - Git commit hash and diff\n - label = subprocess.check_output([\"git\", \"describe\", \"--always\"]).strip()\n - subprocess.call([\"git diff > \" + os.path.join(log_dir, filename_git_diff)], shell=True)\n - Versions of all dependencies??\n - pip freeze > requirements.txt\n\n\nExperiment spec:\n - Dataset\n - String identifier (name)\n - Method\n - String identifier?\n - Hyperparams\n - Random seed\n\n\nWorking with the experiment spec:\n - DatasetLoader class\n - A big `if/elif/else` statement\n - \n\nOn the server:\n - When you run a command in an SSH session, if you close the session the command will die. So you need to run it using\n something like `tmux`, or `screen`. Look into the details\n - Do we need to worry about not hogging 100% CPU? Can we limit our CPU usage? Do we need to?\n - Might want to chat to Scott Rose about this?\n\n\"\"\"\n\n# python test_rig.py --spec_list lots_of_specs.yaml\n\n\ndef run_experiment(experiment_spec):\n results_dir = create_results_directory()\n save_spec(experiment_spec, results_dir)\n save_git_commit_and_diff(results_dir)\n set_seed(experiment_spec.random_seed)\n\n dataset = _load_dataset(experiment_spec.dataset_name)\n method = _init_method(experiment_spec.method_name, experiment_spec.hyperparams)\n\n for i in range(experiment_spec.num_iterations):\n method.run_iteration(dataset)\n log_stuff(method, results_dir)\n\n\ndef _load_dataset(self, name):\n if name == 'thingy':\n \"\"\"Dataset URL: \"\"\"\n do_stuff('~/data/sajhdaksjdh/')\n elif name == 'other_thing':\n do_other_stuff()\n\n\ndef _init_method(self, name, hyperparams):\n return Method(hyperparams)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('spec_list', type=str)\nargs = parser.parse_args()\n\nexperiment_specs = parse_list_of_specs(args.spec_list)\n\nfor spec in experiment_specs:\n run_experiment(spec)\n","repo_name":"BenGutteridge/4YP","sub_path":"March/test_rig.py","file_name":"test_rig.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41513825210","text":"import os\nimport unittest\nfrom ..repair import repair_html_entities, repair_broken_unicode\n\n\nclass TestRepairHtmlEntities(unittest.TestCase):\n def test_single(self):\n source = 'Test me'\n result = repair_html_entities(source)\n\n self.assertEqual('Test me', result)\n\n def test_double(self):\n source = 'Test&lt;me'\n result = repair_html_entities(source)\n\n self.assertEqual('Test<me', result)\n\n\nclass TestRepairBrokenUnicode(unittest.TestCase):\n def test_encoding(self):\n source = '✔ No problems'\n result = repair_broken_unicode(source)\n\n self.assertEqual('✔ No problems', result)\n\n def test_width(self):\n source = 'LOUD NOISES'\n result = repair_broken_unicode(source)\n\n self.assertEqual('LOUD NOISES', result)\n","repo_name":"shkarupa-alex/nlpclean","sub_path":"nlpclean/tests/test_repair.py","file_name":"test_repair.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74158458184","text":"import torch\nimport torch.fx\nfrom torch.fx.node import Node\n\nfrom typing import Dict\n\nclass ShapeProp:\n def __init__(self, mod):\n self.mod = mod\n self.graph = mod.graph\n self.modules = dict(self.mod.named_modules())\n\n def propagate(self, *args):\n args_iter = iter(args)\n env : Dict[str, Node] = {}\n\n def load_arg(a):\n return torch.fx.node.map_arg(a, lambda n: env[n.name])\n\n def fetch_attr(target : str):\n target_atoms = target.split('.')\n attr_itr = self.mod\n for i, atom in enumerate(target_atoms):\n if not hasattr(attr_itr, atom):\n raise RuntimeError(f\"Node referenced nonexistant target {'.'.join(target_atoms[:i])}\")\n attr_itr = getattr(attr_itr, atom)\n return attr_itr\n\n for node in self.graph.nodes:\n if node.op == 'placeholder':\n result = next(args_iter)\n elif node.op == 'get_attr':\n result = fetch_attr(node.target)\n elif node.op == 'call_function':\n result = node.target(*load_arg(node.args), **load_arg(node.kwargs))\n elif node.op == 'call_method':\n self_obj, *args = load_arg(node.args)\n kwargs = load_arg(node.kwargs)\n result = getattr(self_obj, node.target)(*args, **kwargs)\n elif node.op == 'call_module':\n result = self.modules[node.target](*load_arg(node.args), **load_arg(node.kwargs))\n elif node.op == 'output':\n return load_arg(node.args[0])\n\n if isinstance(result, torch.Tensor):\n node.shape = result.shape\n node.dtype = result.dtype\n\n env[node.name] = result\n\n return None\n","repo_name":"K-Wu/pytorch-direct","sub_path":"torch/fx/experimental/shape_prop.py","file_name":"shape_prop.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"2786713057","text":"from datetime import datetime\n\n\nclass Category:\n def __init__(self, category):\n self.category = category\n\nclass Products:\n def __init__(self, name, price, category):\n self.name = name\n self.price = price\n self.category = category\n\n\nclass Stock:\n def __init__(self, products: Products, amount):\n self.products = products\n self.amount = amount\n\n\nclass Sale:\n def __init__(self, unit_sold: Products, seller, buyer, sold_amount, date = datetime.now().strftime(\"%d/%m/%Y\")):\n self.unit_sold = unit_sold\n self.seller = seller\n self.buyer = buyer\n self.sold_amount = sold_amount\n self.date = date\n\n\nclass Provider:\n def __init__(self, name, cnpj, phone, category):\n self.name = name\n self.cnpj = cnpj\n self.phone = phone\n self.category = category\n\n\nclass People:\n def __init__(self, name, phone, cpf, email, address ):\n self.name = name\n self.phone = phone\n self.cpf = cpf\n self.email = email\n self.address = address\n\nclass Employee(People):\n def __init__(self, name, phone, cpf, email, address, clt):\n self.clt = clt\n super().__init__(name, phone, cpf, email, address)","repo_name":"GuttoRegadas/MVC_grocery_store","sub_path":"Models.py","file_name":"Models.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38691103300","text":"import pandas as pd\nimport category_encoders\nfrom category_encoders import TargetEncoder\nimport random\nimport warnings\nimport sys\n\nwarnings.filterwarnings(\"ignore\")\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\n\n#decision tree \nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\n\n#random forest \nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import StandardScaler\n\n#bagging\nfrom numpy import mean\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.ensemble import BaggingClassifier\n\n''' \nBelow are the constants to modify before running the model! \n'''\nMODELS=['rf', 'dt', 'bagging'] #expects a list, rf - Random Forest, dt - Decision Tree, bagging - Bagging \nDEPTH = 5 #depth for Decision Tree Classifier \nSAMPLE_SIZE = 2500\nNUM_HIGH = 1250\nNUM_LOW = 1250\nBALANCED = True #boolean, whether we want to balance the distribution of high/low classes\n\n#The below are the ways to handle the categorical features - they should be mutually exclusive \nUSE_ONE_HOT_ENCODING = False\nUSE_TARGET_ENCODING = False\n\nOUTPUT_FILE = \"./all_output_optimizing_extra\" \nMETADATA = './all_data_final_all_features.csv'\n\n''' \nImportant Note: When editing the feature variables below, if you add features to the ALL_FEATURES list, add the feature to the NONBOOL_COLS l \nist if it is categorical and the BOOL_COLS if it is boolean. These lists are used for one-hot encoding and translating True/False to 0/1 values for our model. There's no need t\\\no remove items from NONBOOL_COLS or BOOL_COLS -- we'll only check for items that are specified as features in the model. \n'''\nNONBOOL_COLS = [] #'platform' \nBOOL_COLS = [\"summer_fire_season\", 'santa_ana_fire_season']\n\nCOLUMNS = ['fire_occurrence','acres_burned','avg_dp_temp','avg_rel_hum','avg_wb_temp','avg_wind_speed','precip','pop_density','latitude','longitude', \"summer_fire_season\", 'santa_ana_fire_season']\nALL_FEATURES = ['avg_dp_temp','avg_rel_hum','avg_wb_temp','avg_wind_speed','precip','pop_density','latitude','longitude', \"summer_fire_season\", 'santa_ana_fire_season']\n#constants for model \nLABEL_COL_NAME = 'fire_occurrence'\nNUM_ESTIMATORS = [20, 50, 100, 200, 300, 400]\nMAX_DEPTH = [10, 20, 30]\nMIN_SAMPLES_SPLIT = [2, 5, 10]\nMIN_SAMPLES_LEAF = [1, 2, 5]\nRANDOM_SEED = 21\n\ndef get_nonbool(subset):\n res = []\n for feature in subset:\n if feature in NONBOOL_COLS:\n res.append(feature)\n return res\n\ndef get_bool(subset):\n res = []\n for feature in subset:\n if feature in BOOL_COLS:\n res.append(feature)\n return res\n\ndef generate_model (models, depth, features, sample_size, balanced, output_file):\n orig_stdout = sys.stdout\n f = open(output_file, \"w\")\n sys.stdout = f\n\n data = pd.read_csv(METADATA, sep=',', dtype={'county':str,'fire_occurrence':int, 'acres_burned': int, 'avg_dp_temp': float, 'avg_rel_hum': float, 'avg_wb_temp': float, 'avg_wind_speed': float, 'precip': float, 'pop_density': float, 'latitude':float, 'longitude':float, \"summer_fire_season\": int, 'santa_ana_fire_season': int}, header=0)\n \n #IF BALANCED, balance high and low classes so that each are 1/2 the number of samples indicated; otherwise just sample the number indicated \n if balanced:\n low = data[data['fire_occurrence'] == 0]\n low = low[['key']]\n high = data[data['fire_occurrence'] == 1]\n high = high[['key']]\n \n dates_high = list(set(high['key']))\n dates_low = list(set(low['key']))\n random.seed(RANDOM_SEED)\n try:\n random_high = random.sample(dates_high, NUM_HIGH)\n print(\"num_high\", len(random_high))\n random_low = random.sample(dates_low, NUM_LOW)\n print(\"num_low\", len(random_low))\n\n data_high = data[data['key'].isin(random_high)]\n data_low = data[data['key'].isin(random_low)]\n frames = [data_high, data_low]\n final_df = pd.concat(frames)\n except:\n print(\"Data not large enough to fit desired sample size. Using full dataset instead...\")\n final_df = data\n else:\n try:\n accessions = list(set(data['key']))\n random.seed(RANDOM_SEED)\n random_dates= random.sample(accessions, sample_size)\n final_df = data[data['key'].isin(random_dates)]\n except:\n print(\"Data not large enough to fit desired sample size. Using full dataset instead...\")\n final_df = data\n category_labels = final_df[LABEL_COL_NAME]\n category_binary = [1 if i == 1 else 0 for i in category_labels]\n final_df[LABEL_COL_NAME] = category_binary\n\n final_df.index = final_df['key']\n final_df = final_df[ALL_FEATURES]\n\n final_df = final_df.fillna(0)\n X_train, X_test, y_train, y_test = train_test_split(final_df, category_binary, test_size=0.3, stratify=category_binary)\n\n for num_estimators in NUM_ESTIMATORS:\n for max_depth in MAX_DEPTH:\n for min_split in MIN_SAMPLES_SPLIT:\n for min_leaf in MIN_SAMPLES_LEAF:\n print(\"num_estimators\", num_estimators, \"max_depth\", max_depth, \"min_split\", min_split, \"min_leaf\", min_leaf)\n rfc = RandomForestClassifier(n_estimators=num_estimators, max_depth=max_depth, min_samples_split = min_split, min_samples_leaf = min_leaf, random_state=RANDOM_SEED)\n rfc.fit(X_train, y_train)\n y_pred = rfc.predict(X_test)\n\n scaler = StandardScaler()\n X_train_sc = scaler.fit_transform(X_train)\n X_test_sc = scaler.transform(X_test)\n\n rfc = RandomForestClassifier(n_estimators=num_estimators, max_depth=max_depth, min_samples_split = min_split, min_samples_leaf = min_leaf, random_state=RANDOM_SEED, class_weight=\"balanced\")\n rfc.fit(X_train_sc, y_train)\n y_pred_sc = rfc.predict(X_test_sc)\n rfc.score(X_test_sc, y_test)\n\n # again we leverage the metrics from sci-kit learn \n print('Accuracy and performance of Random Forests on non-normalized data with 20 estimators:')\n print(accuracy_score(y_test, y_pred))\n print(confusion_matrix(y_test,y_pred))\n print(classification_report(y_test,y_pred))\n \n sys.stdout = orig_stdout\n f.close()\n\ndef main():\n generate_model(MODELS, DEPTH, ALL_FEATURES, SAMPLE_SIZE, BALANCED, OUTPUT_FILE)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n","repo_name":"michellegan/ca_wildfire_prediction","sub_path":"model/model_optimize.py","file_name":"model_optimize.py","file_ext":"py","file_size_in_byte":8459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18573537671","text":"def Union(A,B):\n C=[]\n for a in A:\n C.append(a)\n for b in B:\n C.append(b)\n return C\n \ndef Interseccion(A,B):\n C=[]\n for a in A:\n for b in B:\n if(a==b):\n C.append(a)\n return C\n\ndef Diferencia(A,B):\n C=[]\n flag=False\n for a in A:\n for b in B:\n if(a==b):\n flag=True\n if(flag==False):\n C.append(a)\n flag=False\n return C\n\ndef DiferenciaSim(A,B):\n return Union(Diferencia(A,B),Diferencia(B,A))\n\ndef ImprimirConjunto(A):\n B=[]\n for a in A:\n if a not in B:\n print(a)\n B.append(a)\n\nif __name__==\"__main__\":\n A=[]\n B=[]\n loop=True\n\n for i in range(5):\n A.append(int(input(\"digite un numero para el conjuto A\")))\n for i in range(5):\n B.append(int(input(\"digite un numero para el conjuto B\")))\n while(loop==True):\n print(A)\n print(B)\n print(\" - Que desea hacer - \")\n print(\"1. Union\")\n print(\"2. Interseccion\")\n print(\"3. Diferencia\")\n print(\"4. Diferencia Simetrica\")\n print(\"0. Salir\")\n x=int(input(\"Indique el numero\"))\n if(x==1):\n print(\"conjunto resultante\")\n ImprimirConjunto(Union(A,B))\n if(x==2):\n print(\"conjunto resultante\")\n ImprimirConjunto(Interseccion(A,B))\n if(x==3):\n print(\"conjunto resultante\")\n ImprimirConjunto(Diferencia(A,B))\n if(x==4):\n print(\"conjunto resultante\")\n ImprimirConjunto(DiferenciaSim(A,B))\n if(x==0):\n loop=False\n\n","repo_name":"Vess13/mc1-angel-chaparro","sub_path":"Taller1.py","file_name":"Taller1.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11980358336","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport pandas_datareader as data\r\nfrom keras.models import load_model\r\nimport streamlit as st\r\nimport datetime as dt\r\n\r\nStarting = '2009-01-01'\r\nEnding = dt.datetime.now()\r\n\r\nst.title('Stock Trend Prediction')\r\n\r\nuser_input = st.text_input('Enter Stock Ticker', 'MSFT')\r\ndf = data.DataReader(user_input, 'yahoo', Starting, Ending)\r\n\r\n\r\nst.subheader('Data of last 10 days')\r\nst.write(df.tail(10))\r\n\r\n\r\nst.subheader('Price vs Time')\r\nfig = plt.figure(figsize = (12,6))\r\nplt.plot(df.Close)\r\nst.pyplot(fig)\r\n\r\n\r\n\r\ntraining=pd.DataFrame(df['Close'][0:int(len(df)*0.80)])\r\ntesting=pd.DataFrame(df['Close'][int(len(df)*0.80):int(len(df))])\r\n\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nscaler=MinMaxScaler(feature_range=(0,1))\r\n\r\ntraining_array=scaler.fit_transform(training)\r\n\r\npred_days=100\r\n\r\n\r\n\r\nx_train=[]\r\ny_train=[]\r\n\r\nfor i in range(pred_days,training_array.shape[0]):\r\n x_train.append(training_array[i-pred_days:i])\r\n y_train.append(training_array[i,0])\r\n \r\nx_train,y_train=np.array(x_train),np.array(y_train)\r\n\r\n\r\n\r\nmodel=load_model('C:/Users/Subhojit/Desktop/project/keras_model.h5')\r\n\r\npast100Days_data = training.tail(100)\r\nfinal = past100Days_data.append(testing, ignore_index=True)\r\nInput = scaler.fit_transform(final)\r\n\r\nx_test = []\r\ny_test = []\r\n\r\nfor i in range(pred_days, Input.shape[0]):\r\n x_test.append(Input[i-pred_days: i])\r\n y_test.append(Input[i, 0])\r\n \r\nx_test, y_test = np.array(x_test), np.array(y_test)\r\n\r\ny_predicted = model.predict(x_test)\r\n\r\nscaler=scaler.scale_\r\n\r\nscale_factor = 1/scaler[0]\r\ny_predicted = y_predicted*scale_factor\r\ny_test = y_test * scale_factor\r\n\r\n\r\nst.subheader('Predicted Value')\r\nfig_pred=plt.figure(figsize=(12,6))\r\nplt.plot(y_test, 'b', label = 'Original Price')\r\nplt.plot(y_predicted, 'r', label = 'Predicted Price')\r\nplt.xlabel('Time')\r\nplt.ylabel('Price')\r\nplt.legend()\r\nst.pyplot(fig_pred)\r\n","repo_name":"Subhojitpaul559/Stock_market_preds","sub_path":"project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14053823790","text":"INPUTPATH = \"input.txt\"\n#INPUTPATH = \"input-test.txt\"\nwith open(INPUTPATH) as ifile:\n raw = ifile.read()\nheader, *bodies = raw.strip().split(\"\\n\\n\")\n\nfrom typing import NamedTuple, cast\nfrom collections.abc import Iterable\nclass Win(NamedTuple):\n turn: int\n score: int\nclass Board:\n unmarked: set[int]\n axes: tuple[set[int], ...]\n def __init__(self, grid: Iterable[Iterable[int]]):\n rows = tuple(tuple(row) for row in grid)\n cols: tuple[tuple[int, ...], ...] = tuple(map(tuple, zip(*rows)))\n self.axes = tuple(map(set, rows + cols))\n self.unmarked = set(n for a in self.axes for n in a)\n def mark(self, number: int) -> bool:\n self.unmarked.discard(number)\n for a in self.axes:\n a.discard(number)\n if not a:\n return True\n return False\n def play(self, draws: Iterable[int]) -> Win | None:\n for i, n in enumerate(draws):\n if self.mark(n):\n return Win(i + 1, sum(self.unmarked) * n)\n return None\n\ndraws = tuple(map(int, header.split(\",\")))\nresults = tuple(\n Board(map(int, r.strip().split()) for r in body.strip().split(\"\\n\")).play(draws)\n for body in bodies\n)\nassert all(isinstance(r, Win) for r in results)\nwins = cast(Iterable[Win], results)\nprint(min(wins).score)\nprint(max(wins).score)\n","repo_name":"Floozutter/aoc-2021-python","sub_path":"day04/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5531801568","text":"import pandas as pd\nimport json\nimport boto3\nimport csv\nimport io\n\ns3Client = boto3.client('s3')\n\ndef lambda_handler(event,context):\n \n # Get Bucket and file name\n bucket = event['Records'][0]['s3']['bucket']['name']\n key = event['Records'][0]['s3']['object']['key']\n \n print(bucket)\n print(key)\n \n # Get Object\n response = s3Client.get_object(Bucket=bucket, Key=key)\n \n teachers = ['cucinella', 'asaro']\n file_path = \"/content/sample_data/preferenze.csv\" \n max_students_per_class = 20 \n \n # Run the algorithm\n students = generate_student_dictionary(response)\n \n\n # Remove companion preferences that are not in the list of students\n for student in students:\n if student['companion_preference'] not in [s['name'] for s in students]:\n student['companion_preference'] = ''\n \n # Assign classes\n class_assignments = assign_classes(students, teachers, max_students_per_class)\n \n # need to convert class_assignments to csv before\n\n modified_csv = class_assignments\n \n s3.put_object(Bucket=bucket, Key=key, Body=modified_csv) \n\n # Generate donwload URL for edited CSV\n url = s3.generate_presigned_url(\n 'get_object',\n Params={'Bucket': bucket_name, 'Key': object_key},\n ExpiresIn=3600 # link expiration\n )\n\n # Restituisci l'URL come risposta\n return {\n 'statusCode': 200,\n 'body': json.dumps({'download_url': url})\n }\n\ndef generate_student_dictionary(file_path):\n students = []\n \n data_response = response['Body'].read().decode('utf-8')\n data_response_file = StringIO(data_response)\n \n # Read the CSV file\n data = pd.read_csv(data_response_file, delimiter=',')\n \n for _, row in data.iterrows():\n # Ignore rows with missing or invalid values in 'professore' column\n if pd.notna(row['professore']):\n student = {\n 'name': row['studente'],\n 'teacher_preferences': [row['professore']],\n 'companion_preference': row['compagno']\n }\n students.append(student)\n\n return students\n\ndef assign_classes(students, teachers, max_students_per_class):\n class_assignments = {teacher: {1: [], 2: []} for teacher in teachers}\n companions = {student['name']: student['companion_preference'] for student in students}\n\n single_preference_students = [student for student in students if len(student['teacher_preferences']) == 1]\n other_students = [student for student in students if len(student['teacher_preferences']) == 0 and student['companion_preference'] == '']\n\n # Assign students with single preference\n for student in single_preference_students:\n teacher = student['teacher_preferences'][0]\n class_num = 1 if len(class_assignments[teacher][1]) < max_students_per_class else 2\n class_assignments[teacher][class_num].append(student['name'])\n if student['companion_preference'] in companions and isinstance(companions[student['companion_preference']], list):\n companions[student['companion_preference']].remove(student['name'])\n\n # Assign students without preferences\n for student in other_students:\n teacher = min(teachers, key=lambda teacher: sum(len(class_assignments[teacher][i]) for i in [1, 2]))\n class_num = 1 if len(class_assignments[teacher][1]) < max_students_per_class else 2\n class_assignments[teacher][class_num].append(student['name'])\n\n # Assign remaining students without preferences or companions\n for teacher in teachers:\n for class_num in [1, 2]:\n while len(class_assignments[teacher][class_num]) < max_students_per_class and students:\n student = students.pop(0)\n class_assignments[teacher][class_num].append(student['name'])\n\n return class_assignments\n","repo_name":"lucadigangi/generateClass","sub_path":"function/lambdaFunction.py","file_name":"lambdaFunction.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73276782665","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom time import time\r\n\r\nx = np.zeros(10000)#vetor para guardar as variaveis geradas\r\ninv = np.zeros(10000)\r\nsoma = 0 #variavel para ajudar na hora de retirar a média\r\nsomatorio = 0 #variavel para ajudar na hora da variancia \r\nλ = 3\r\na = 3\r\nc = 0.1\r\nx[0] = time() #seed\r\nm = 2**64\r\n\r\n\r\n\r\nfor i in range(1, 10000): # repetição \r\n x[i] = (((a * x[i-1]) + c) % m) #formula para gerar as variaveis aleatorias\r\n \r\nx /= (m-1)\r\n\r\nfor i in range(10000):\r\n inv[i] = (-1/λ) * np.log(1 - x[i]) #CDF inversa\r\n\r\n\r\nfor i in range(10000):\r\n soma = inv[i] + soma\r\n\r\nmedia = soma/10000 #media amostral\r\n\r\n\r\nfor i in range(10000):\r\n somatorio = somatorio + (inv[i] - media) ** 2\r\n \r\nvariancia = somatorio/10000 #variancia amostral\r\n\r\nplt.hist(inv)#histograma\r\nplt.show()#histograma\r\n\r\naux, bin = np.histogram(inv)#Cdf\r\npdf = aux / sum(aux)#Cdf\r\ncdf = np.cumsum(pdf)#Cdf\r\nplt.plot(bin[1:], cdf)#Cdf\r\nplt.show()#Cdf\r\n\r\n\r\nmediat = 1/λ #media teorica\r\nvarianciat = 1/λ ** 2 # variancia teorica\r\n\r\nprint(f'Media Teorica = {mediat}')\r\nprint(f'Variancia Teorica = {varianciat}') \r\n\r\nprint(f'media = {media}')\r\nprint(f'variancia = {variancia}')","repo_name":"Glayber16/trab-estatistica","sub_path":"quest 2.py","file_name":"quest 2.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22214035541","text":"import sqlite3\nimport pprint\n\ndef get_user(username):\n conn = sqlite3.connect('Bookstore.db')\n c = conn.cursor()\n keys = ['username', 'email', 'is_admin', 'password']\n c.execute(f\"SELECT \" + ', '.join(keys) + f\" FROM user WHERE username='{username}'\")\n user = c.fetchone()\n conn.close()\n if user == None:\n return None\n user = dict(zip(keys, user))\n user['is_authenticated'] = False\n user['is_active'] = True\n user['is_anonymous'] = False\n user['get_id'] = lambda: user['username']\n\n return user\n\ndef prnt(s):\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(s)\n\n# Test get_user\nprnt(get_user('admin'))","repo_name":"AminKln/Bookstore","sub_path":"tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72582518024","text":"# -*- coding: utf-8 -*-\n\"\"\"\n#功能:通通量化投资开发环境全局变量\n#版本:Ver1.00\n#设计人:独狼荷蒲\n#电话:18578755056\n#QQ:2775205\n#百度:荷蒲指标\n#开始设计日期: 2018-07-08\n#公众号:独狼股票分析\n#使用者请同意最后<版权声明>\n#最后修改日期:2018年9月14日\n#主程序:HP_main.py\n\"\"\"\n\nimport platform\nfrom HP_global import *\nimport pandas as pd\n\n# 软件名称\nG_root = None\nG_name = '通通证券分析研究平台'\nG_title = '通通证券分析研究平台'\nG_ico = '.\\omr.ico'\nG_winW = 1280\nG_winH = 850\nG_ver = 1.00\nG_user = '13311968726'\nG_passwd = 'ftp123'\nG_login = False\nG_tk = 'import tkinter as tk'\nG_tk1 = 'from tkinter import *'\nG_tk2 = 'from tkinter import ttk'\nG_os = 1\n\nG_pyver = int(platform.python_version()[0:1])\n\n###########################################\n# 软件参数\nG_gtype = 3\nG_stock = '000001.XSHE'\nG_df = None\nG_sday = '2018-01-01'\nG_eday = '2018-09-08'\nG_index = 'KDJ'\nG_MA1 = 5\nG_MA2 = 10\nG_MA3 = 20\nG_MA4 = 60\nG_MA5 = 120\nG_MA6 = 240\nG_MAV1 = 5\nG_MAV2 = 10\n\n\n########################################\n# 操作系统类型\ndef UseOS():\n sysstr = platform.system()\n if (sysstr == \"Windows\" or sysstr == \"windows\"):\n return 1\n elif (sysstr == \"Linux\"):\n return 2\n else:\n return 3\n\n\ndef HP_init():\n # 软件名称\n G_name = '聚宽证券分析研究平台'\n G_title = '聚宽证券分析研究平台'\n G_ver = 1.00\n G_login = False\n G_tk = 'import tkinter as tk'\n G_tk1 = 'from tkinter import *'\n G_tk2 = 'from tkinter import ttk'\n G_pyver = int(platform.python_version()[0:1])\n G_os = UseOS()\n if G_pyver == 3:\n G_tk = 'import tkinter as tk'\n G_tk1 = 'from tkinter import *'\n G_tk2 = 'from tkinter import ttk'\n else:\n G_tk = 'import Tkinter as tk'\n G_tk1 = 'from Tkinter import *'\n G_tk2 = 'from Tkinter import ttk'\n\n\n# 通用平均线计算\ndef G_MA(Series, n):\n G_pyver = int(platform.python_version()[0:1])\n G_ma = None\n if G_pyver == 2:\n G_MAstr = 'pd.rolling_mean(Series,n)'\n G_ma = eval(G_MAstr)\n else:\n G_MAstr = 'Series.rolling(window=n,center=False).mean()'\n G_ma = eval(G_MAstr)\n return G_ma\n\n\n#####################################################\n################独狼荷蒲软件版权声明###################\n'''\n独狼荷蒲软件(或通通软件)版权声明\n1、独狼荷蒲软件(或通通软件)均为软件作者设计,或开源软件改进而来,仅供学习和研究使用,不得用于任何商业用途。\n2、用户必须明白,请用户在使用前必须详细阅读并遵守软件作者的“使用许可协议”。\n3、作者不承担用户因使用这些软件对自己和他人造成任何形式的损失或伤害。\n4、作者拥有核心算法的版权,未经明确许可,任何人不得非法复制;不得盗版。作者对其自行开发的或和他人共同开发的所有内容,\n 包括设计、布局结构、服务等拥有全部知识产权。没有作者的明确许可,任何人不得作全部或部分复制或仿造。\n\n独狼荷蒲软件\nQQ: 2775205\nTel: 18578755056\n公众号:独狼��票分析\n'''\n","repo_name":"tianhm/ttquant","sub_path":"HP_set.py","file_name":"HP_set.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"41841701419","text":"import random\n\nimport PIL.Image\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom deelemma.datasets.mnist import MnistDataset, Mnist\nfrom deelemma.utils.generic import easy_conv\n\n\ndef save_asl():\n asl_train = 'data/asl_alphabet_db/asl_alphabet_train/asl_alphabet_train/'\n # asl_test = 'data/asl_alphabet_db/asl_alphabet_test/asl_alphabet_test/'\n\n fig, axs = plt.subplots(3, 3)\n fig.tight_layout()\n for i in range(3):\n for j in range(3):\n let = ['a', 's', 'l'][j]\n n = random.randint(0, 3000)\n path = asl_train + f'{let.upper()}/{let.upper()}{n}.jpg'\n axs[i, j].imshow(mpimg.imread(path))\n\n plt.savefig('out/misc/asl_kaggle.svg', bbox_inches='tight')\n plt.clf()\n\n\ndef save_mnist():\n mnist = Mnist(MnistDataset.TF_KERAS)\n\n fig, axs = plt.subplots(1, 5)\n fig.tight_layout()\n plt.subplots_adjust(left=0.1,\n bottom=0.1,\n right=0.9,\n top=0.9,\n wspace=0.4,\n hspace=0)\n for j in range(5):\n array = mnist.get_train_images()[j].reshape((28, 28))\n fign = axs[j].imshow(array)\n axs[j].set_axis_off()\n fign.set_cmap('binary')\n\n plt.savefig('out/misc/mnist.svg', bbox_inches='tight')\n plt.clf()\n\n\ndef save_dog4():\n path = 'data/misc/dog.jpg'\n image = PIL.Image.open(path).convert('RGBA')\n array = np.array(image) # noqa\n\n fig, ((axs0, axs1), (axs2, axs3)) = plt.subplots(2, 2)\n fig.tight_layout()\n\n axs0.imshow(array)\n axs1.imshow(array.mean(axis=2, keepdims=True))\n fig2 = axs2.imshow(array.mean(axis=2, keepdims=True))\n fig2.set_cmap('gray')\n fig3 = axs3.imshow(array.mean(axis=2, keepdims=True))\n fig3.set_cmap('binary')\n\n plt.savefig('out/misc/dog_0.svg', bbox_inches='tight')\n plt.clf()\n\n\ndef save_conv():\n path = 'data/misc/dog.jpg'\n image = PIL.Image.open(path).convert('RGBA')\n _input = np.array(image) # noqa\n\n fig, axs = plt.subplots(2, 2)\n plt.subplots_adjust(left=0.1,\n bottom=0.06,\n right=0.9,\n top=0.94,\n wspace=0.3,\n hspace=0.3)\n\n axs[0, 0].title.set_text('Original')\n axs[0, 0].imshow(_input)\n\n kernel = [[1, 4, 6, 4, 1],\n [4, 16, 24, 16, 4],\n [6, 24, 36, 24, 6],\n [4, 16, 24, 16, 4],\n [1, 4, 6, 4, 1]]\n _output = easy_conv(_input, kernel)\n axs[0, 1].title.set_text('Gaussian Blur')\n fign = axs[0, 1].imshow(_output)\n fign.set_cmap('gray')\n\n kernel = [[1, 0, -1],\n [1, 0, -1],\n [1, 0, -1]]\n _output = easy_conv(_input, kernel)\n axs[1, 0].title.set_text('Vertical edges')\n fign = axs[1, 0].imshow(_output)\n fign.set_cmap('gray')\n\n kernel = [[1, 1, 1],\n [0, 0, 0],\n [-1, -1, -1]]\n _output = easy_conv(_input, kernel)\n axs[1, 1].title.set_text('Horizontal edges')\n fign = axs[1, 1].imshow(_output)\n fign.set_cmap('gray')\n\n plt.savefig('out/misc/dog_conv.svg', bbox_inches='tight')\n plt.clf()\n\n\ndef save_fake_pooling():\n path = 'data/misc/dog.jpg'\n image = PIL.Image.open(path).convert('L')\n\n fig, axs = plt.subplots(2, 3)\n axs = axs.reshape(6)\n fig.tight_layout()\n plt.subplots_adjust(left=0.1,\n bottom=0.1,\n right=0.9,\n top=0.9,\n wspace=0.1,\n hspace=0)\n\n def pool_n(n):\n size = 400//(2**n)\n axs[n].title.set_text(f'{size}x{size}')\n axs[n].set_axis_off()\n fign = axs[n].imshow(np.array(image.resize((size, size)))) # noqa\n fign.set_cmap('gray')\n\n for i in range(6):\n pool_n(i)\n\n plt.savefig('out/misc/dog_pool.svg', bbox_inches='tight')\n plt.clf()\n\n\nif __name__ == '__main__':\n pass\n # save_mnist()\n # save_asl()\n # save_conv()\n # save_fake_pooling()\n","repo_name":"eggonz/deelemma","sub_path":"examples/print_samples.py","file_name":"print_samples.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"86751859752","text":"import torch\nimport torch.nn as nn\nimport os\nimport datetime\nfrom src.domain import Domain\n\nfrom src.struct import Params\nfrom src.pinn import PINN\nfrom src.utils import get_device\nfrom src.func import Exact\nfrom src.plot import save_anim, save_loss_plot, save_solution_plot, save_initial_plot\nfrom src.train import train_pinn_from_params\nfrom src.io import *\n\nimport matplotlib as mpl\nmpl.use(\"Agg\")\n\n# params_bc = ['zero', 'reflective']\n# params_bc_abbr = [\"dir\", \"neu\"]\n\nparams_bc = ['zero']\nparams_bc_abbr = [\"dir\"]\n\nparams_a = [0.5, 1.0, 2.0]\nparams_a_abbr = [\"05\", \"1\", \"2\"]\n\nparams_c = [0.3, 0.5, 1.0, 2.0, 3.0]\nparams_c_abbr = [\"03\", \"05\", \"1\", \"2\", \"3\"]\n\nparams_phi = [2.0, 4.0, 6.0]\nparams_phi_abbr = [\"2\", \"4\", \"6\"]\n\ndevice = get_device()\nparamss = []\nresults = []\ni = 1\nall = 45\nfor bc, bc_abbr in zip(params_bc, params_bc_abbr):\n for a, a_abbr in zip(params_a, params_a_abbr):\n for c, c_abbr in zip(params_c, params_c_abbr):\n for phi, phi_abbr in zip(params_phi, params_phi_abbr):\n tag = f\"{bc_abbr}_c{c_abbr}_A{a_abbr}_phi{phi_abbr}\"\n print(f\"Training PINN: {i}/{all}\")\n os.mkdir(f\"results/{tag}\")\n params = Params(boundary_condition=bc, a=a, c=c, phi=phi)\n domain = Domain.from_params(params)\n pinn, loss, result = train_pinn_from_params(params, tag, device, print_each=None)\n exact = Exact.from_params(params)\n # result = Result(tag, 1,2,3,4,5,6)\n paramss.append(params)\n results.append(result)\n save_result(params, result)\n save_pinn(pinn, tag)\n save_loss(loss, tag)\n save_solution_plot(pinn, exact, tag, domain)\n save_initial_plot(pinn, exact, tag, domain.x)\n save_loss_plot(loss, tag)\n save_anim(pinn, tag, domain)\n torch.cuda.empty_cache()\n i = i + 1\n\n# save_results(paramss, results, \"checked_problems\")\n","repo_name":"pmaczuga/pinn-comparison","sub_path":"check_problems.py","file_name":"check_problems.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5429264241","text":"from linked_list.ListNode import ListNode\nfrom linked_list.UnorderedList import UnorderedList\n\n\ndef sum_list_stored_in_reverse_order(head1, head2):\n \"\"\"\n You have two numbers represented by a linked list, where each node contains a single\n digit. The digits are stored in reverse order, such that the last digit is at the head of the list. Write a\n function that adds the two numbers and returns the sum as a linked list.\n EXAMPLE\n Input: ( 7 - > 1 -> 6) + (5 -> 9 -> 2).That is,617 + 295.\n Output: 2 -> 1 -> 9. That is, 912.\n\n :param head1:\n :param head2:\n :return: result\n \"\"\"\n result = UnorderedList()\n\n start1 = head1\n start2 = head2\n\n carry = 0\n result_head = None\n last = None\n while start1 is not None or start2 is not None:\n data1 = data2 = 0\n if start1 is not None:\n data1 = start1.data\n\n if start2 is not None:\n data2 = start2.data\n\n tmp = data1 + data2 + carry\n if tmp >= 10:\n tmp = tmp - 10\n carry = 1\n\n else:\n carry = 0\n\n node = ListNode(tmp)\n if result_head is None:\n result_head = node\n last = node\n\n last.next_node = node\n last = node\n\n if start1 is not None:\n start1 = start1.next_node\n\n if start2 is not None:\n start2 = start2.next_node\n\n if carry == 1:\n # create a new node for carry 1\n node = ListNode(1)\n # insert this node after last node\n last.next_node = node\n\n result.head = result_head\n\n return result\n\n\nlist1 = UnorderedList(7, 3)\nlist2 = UnorderedList(8, 9, 5, 2)\nresult = sum_list_stored_in_reverse_order(list1.head, list2.head)\nresult.print()\nprint(\"Sum {}\".format(37 + 2598))\n","repo_name":"akash240577/algorithms","sub_path":"linked_list/sum_lists.py","file_name":"sum_lists.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42405662607","text":"'''\n https://leetcode.com/problems/subsets/\n\n Given an integer array nums of unique elements, return all possible subsets (the power set).\n\n The solution set must not contain duplicate subsets. Return the solution in any order.\n'''\n\n'''\n Accepted\n'''\n\n\nclass Solution:\n # at each step we need to consider the case where we take the current element or we don't take it\n def subsets_helper(self, nums, i, current_subset, solution):\n if i >= len(nums):\n # we already explored all our elements\n solution.append(current_subset)\n else:\n # we either take the element at i or we don't\n current_subset_without = current_subset.copy()\n current_subset_with = current_subset.copy()\n current_subset_with.append(nums[i])\n\n self.subsets_helper(nums, i + 1, current_subset_without, solution)\n self.subsets_helper(nums, i + 1, current_subset_with, solution)\n\n def subsets(self, nums: [int]) -> [[int]]:\n solution = []\n\n self.subsets_helper(nums, 0, [], solution)\n\n return solution\n\n\nprint(Solution().subsets([1, 2, 3]))\n","repo_name":"hnc01/online-judge","sub_path":"LeetCode/medium/subsets.py","file_name":"subsets.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10775418655","text":"import bisect\n\n\ndef solution(stones, k):\n box = list(sorted(stones[:k]))\n answer = box[-1]\n\n for i in range(len(stones) - k):\n box.pop(bisect.bisect_left(box, stones[i]))\n bisect.insort_left(box, stones[i + k])\n if answer > box[-1]:\n answer = box[-1]\n return answer\n\n\n_stone = [2, 4, 5, 3, 2, 1, 4, 2, 5, 1]\n_k = 3\n\nprint(solution(_stone, _k))\n","repo_name":"wnstjr9711/Study","sub_path":"프로그래머스/kakao/징검다리 건너기.py","file_name":"징검다리 건너기.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7236594534","text":"class Measurement:\n def __init__(\n self,\n value=None, # K0001\n attribute=None, # K0002\n date_time=None, # K0004\n event=None, # K0005\n batch_no=None, # K0006\n nest_no=None, # K0007\n controller_no=None, # K0008\n machine_no=None, # K0010\n process_parameter=None, # K0011\n control_no=None,\n subgroup_size=None,\n error_count=None,\n ):\n self.value = value\n self.attribute = attribute\n self.datetime = date_time\n self.event = event\n self.batch_no = batch_no\n self.nest_no = nest_no\n self.controller_no = controller_no\n self.machine_no = machine_no\n self.process_parameter = process_parameter\n self.control_no = control_no\n self.subgroup_size = subgroup_size\n self.error_count = error_count\n\n def as_value_dictionary(self):\n \"Returns the measurement as dictionary. The entry is limited to the datetime and the value.\"\n return {\"datetime\": self.datetime, \"value\": self.value}\n\n def as_dictionary(self):\n \"Returns the measurement as dictionary.\"\n return {\n \"datetime\": self.datetime,\n \"value\": self.value,\n \"attribute\": self.attribute,\n \"event\": self.event,\n \"batch_no\": self.batch_no,\n \"nest_no\": self.nest_no,\n \"controller_no\": self.controller_no,\n \"machine_no\": self.machine_no,\n \"process_parameter\": self.process_parameter,\n \"control_no\": self.control_no,\n \"subgroup_size\": self.subgroup_size,\n \"error_count\": self.error_count,\n }\n","repo_name":"successfactory/aqdef-reader","sub_path":"aqdefreader/measurement.py","file_name":"measurement.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"36026357193","text":"import openai\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n# Ensure you have your OPENAI_API_KEY saved in your environment variables\napi_key = os.getenv('OPENAI_API_KEY')\n\nif api_key is None:\n print(\"Please set your OPENAI_API_KEY in your environment variables.\")\n exit()\n\nopenai.api_key = api_key\n\nprint(dir(openai))\n\ndef query_gpt(prompt, model='text-davinci-003', max_tokens=150):\n \"\"\"\n Function to query the OpenAI GPT-4 API (or whichever is the latest)\n\n :param prompt: str, a prompt for the AI.\n :param engine: str, the engine's identifier (assuming 'text-davinci-003' is for GPT-4 or replace with GPT-4's engine ID).\n :param max_tokens: int, the maximum length of the sequence to be generated.\n :return: str, the AI's response\n \"\"\"\n\n try:\n response = openai.completions.create(\n model,\n messages=[\n {\"role\": \"system\", \"content\": \"You are a poetic assistant, skilled in explaining complex programming concepts with creative flair.\"},\n {\"role\": \"user\", \"content\": \"Compose a poem that explains the concept of recursion in programming.\"}\n ]\n)\n\n # Extract the text from the response object\n text = response.choices[0].message\n return text\n except Exception as e:\n print(f\"Received an error from the OpenAI API: {e}\")\n return None\n\n# Test the function\nprompt = \"Translate the following English text to French: 'Hello, how are you?'\"\nresponse = query_gpt(prompt)\nif response:\n print(response)","repo_name":"hackclub/blot","sub_path":"critic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"81"} +{"seq_id":"7151649632","text":"__author__ = \"Andrew Beck\"\n__copyright__ = \"Copyright (C) 2019 Andrew Beck\"\n__license__ = \"GNU General Public License v3\"\n__version__ = \"0.1\"\n\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n\nimport multiprocessing\nimport queue\nimport time\nimport os\nimport logging\nimport logging.handlers\n\n# As a multiprocess Python script is hard to debug, but you can use pudb.remote\n# You will need to import the pudb.remote module and set a breakpoint in code with\n#from pudb.remote import set_trace\n#set_trace(term_size=(80, 24))\n# Then telnet to the port shown on screen\n\nclass Cleanup(multiprocessing.Process):\n \"\"\" A class to handle deleting old video and images\n \"\"\"\n def __init__(self, *, config, log_queue):\n \"\"\"\n Initialise the cleanup class\n\n Keyword arguments:\n config -- A ConfigParser object\n\n log_queue -- A queue object to send log messages to\n \"\"\"\n super(Cleanup, self).__init__()\n self.config = config\n self.log_queue = log_queue\n\n self.stoprequest = multiprocessing.Event()\n\n # Setup logging\n h = logging.handlers.QueueHandler(self.log_queue) # Just the one handler needed\n self.queue_logger = logging.getLogger(name='Cleanup')\n self.queue_logger.addHandler(h)\n # apply this unit's logging level\n log_level = {'CRITICAL': logging.CRITICAL, 'ERROR': logging.ERROR, 'WARNING': logging.WARNING, 'INFO': logging.INFO, 'DEBUG': logging.DEBUG}\n self.queue_logger.setLevel(log_level[self.config['CLEANUP']['log_level']])\n \n def run(self):\n self.queue_logger.info(\"Cleanup started\")\n print(\"Cleanup started\\n\")\n\n # Loop while not asked to exit\n while not self.stoprequest.is_set():\n notification_out_path = self.config['RECORD']['notification_out_path']\n video_out_path = self.config['RECORD']['video_out_path']\n # Calculate time limit for files\n time_limit = time.time() - (int(self.config['CLEANUP']['days_to_keep'])*60*60*24)\n self.queue_logger.debug(f\"time_limit={time_limit}\")\n # Check notification_out_path for images that are too old\n oldfiles = [f for f in os.listdir(notification_out_path) if os.path.isfile(os.path.join(notification_out_path, f)) and os.path.splitext(f)[1] == \".jpg\" and os.path.getmtime(os.path.join(notification_out_path, f)) < time_limit]\n self.queue_logger.debug(f\"oldfiles={oldfiles}\")\n # Delete old files\n for f in oldfiles:\n full_f = os.path.join(notification_out_path, f)\n self.queue_logger.info(f\"Deleting file={full_f}\")\n try:\n os.remove(full_f)\n except OSError:\n self.queue_logger.warning(f\"Cannot Delete={full_f}\")\n\n # Check video_out_path for video that are too old\n for root, dirs, files in os.walk(video_out_path, topdown=False):\n # Process the files in the directory\n for f in files:\n if os.path.splitext(f)[1] == \".h264\" or os.path.splitext(f)[1] == \".mp4\":\n # check the age\n full_f = os.path.join(root, f)\n try:\n file_time = os.path.getmtime(full_f)\n except OSError:\n self.queue_logger.warning(f\"Cannot get modification time of File={full_f}\")\n continue\n\n if file_time < time_limit:\n self.queue_logger.info(f\"Deleting file={full_f}\")\n try:\n os.remove(full_f)\n except OSError:\n self.queue_logger.warning(f\"Cannot Delete File={full_f}\")\n\n # Process the directories to see if any are empty\n for d in dirs:\n full_d = os.path.join(root, d)\n if len(os.listdir(full_d)) == 0:\n # Directory is empty try to delete it\n self.queue_logger.info(f\"Deleting directory={full_d}\")\n try:\n os.rmdir(full_d)\n except OSError:\n self.queue_logger.warning(f\"Cannot Delete Directory={full_d}\")\n\n\n # Go to sleep for a bit so not a tight loop\n # As the record might take up to 1 minute to finish looping every 30 seconds should not delay exit much.\n time.sleep(30)\n\n\n\n def join(self, timeout=None):\n self.queue_logger.info(\"Cleanup asked to exit\")\n self.stoprequest.set()\n super(Cleanup, self).join(timeout)","repo_name":"Zardozz/pir-security","sub_path":"cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":5387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12101198432","text":"from tensorflow import keras\n\ndef label_smoothing_loss(y_true, y_pred, smoothing=0.1):\n \"\"\"\n Custom loss function implementing label smoothing.\n \"\"\"\n num_classes = y_true.shape[-1]\n smooth_positives = 1.0 - smoothing\n smooth_negatives = smoothing / num_classes\n y_true = y_true * smooth_positives + smooth_negatives\n\n return keras.losses.categorical_crossentropy(y_true, y_pred)\n","repo_name":"nirlotan/SocialAI","sub_path":"Classification/models/custom_loss.py","file_name":"custom_loss.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34286849690","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Import common code\nfrom sysadmws_common import *\nimport gitlab\nimport glob\nimport textwrap\nimport subprocess\nimport paramiko\nimport sys\nimport json\nfrom io import BytesIO\nimport threading\nimport re\nimport time\nfrom datetime import datetime\n\n# Constants and envs\n\nLOGO=\"Services\"\nWORK_DIR = os.environ.get(\"ACC_WORKDIR\", \"/opt/sysadmws/accounting\")\nLOG_DIR = os.environ.get(\"ACC_LOGDIR\", \"/opt/sysadmws/accounting/log\")\nLOG_FILE = \"services.log\"\nCLIENTS_SUBDIR = \"clients\"\nTARIFFS_SUBDIR = \"tariffs\"\nYAML_GLOB = \"*.yaml\"\nYAML_EXT = \"yaml\"\nACC_YAML = \"accounting.yaml\"\n\n# Main\n\nif __name__ == \"__main__\":\n\n # Set parser and parse args\n parser = argparse.ArgumentParser(description='{LOGO} functions.'.format(LOGO=LOGO))\n parser.add_argument(\"--debug\",\n dest=\"debug\",\n help=\"enable debug\",\n action=\"store_true\")\n\n parser.add_argument(\"--ignore-jobs-disabled\",\n dest=\"ignore_jobs_disabled\",\n help=\"ignore jobs_disabled if set in yaml\",\n action=\"store_true\")\n parser.add_argument(\"--at-date\", dest=\"at_date\", help=\"use DATETIME instead of now for tariff\", nargs=1, metavar=(\"DATETIME\"))\n parser.add_argument(\"--salt-ssh\",\n dest=\"salt_ssh\",\n help=\"use salt-ssh in salt, applicable to projects with minions\",\n action=\"store_true\")\n\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument(\"--exclude-clients\",\n dest=\"exclude_clients\",\n help=\"exclude clients defined by JSON_LIST from all-clients operations\",\n nargs=1, metavar=(\"JSON_LIST\"))\n\n group.add_argument(\"--include-clients\",\n dest=\"include_clients\",\n help=\"include only clients defined by JSON_LIST for all-clients operations\",\n nargs=1, metavar=(\"JSON_LIST\"))\n\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"--pipeline-salt-cmd-for-asset-for-client\",\n dest=\"pipeline_salt_cmd_for_asset_for_client\",\n help=\"pipeline salt CMD for one ASSET for client CLIENT\",\n nargs=3, metavar=(\"CLIENT\", \"ASSET\", \"CMD\"))\n\n group.add_argument(\"--pipeline-salt-cmd-for-all-assets-for-client\",\n dest=\"pipeline_salt_cmd_for_all_assets_for_client\",\n help=\"pipeline salt CMD for all assets for client CLIENT\",\n nargs=2, metavar=(\"CLIENT\", \"CMD\"))\n\n group.add_argument(\"--pipeline-salt-cmd-for-all-assets-for-all-clients\",\n dest=\"pipeline_salt_cmd_for_all_assets_for_all_clients\",\n help=\"pipeline salt CMD for all assets for all clients excluding --exclude-clients or only for --include-clients\",\n nargs=1, metavar=(\"CMD\"))\n\n if len(sys.argv) > 1:\n args = parser.parse_args()\n else:\n parser.print_help()\n sys.exit(1)\n\n # Set logger and console debug\n if args.debug:\n logger = set_logger(logging.DEBUG, LOG_DIR, LOG_FILE)\n else:\n logger = set_logger(logging.ERROR, LOG_DIR, LOG_FILE)\n\n # Catch exception to logger\n\n try:\n\n logger.info(\"Starting {LOGO}\".format(LOGO=LOGO))\n\n # Chdir to work dir\n os.chdir(WORK_DIR)\n\n # Read ACC_YAML\n acc_yaml_dict = load_yaml(\"{0}/{1}\".format(WORK_DIR, ACC_YAML), logger)\n if acc_yaml_dict is None:\n raise Exception(\"Config file error or missing: {0}/{1}\".format(WORK_DIR, ACC_YAML))\n \n # Do tasks\n\n if args.exclude_clients is not None:\n json_str, = args.exclude_clients\n exclude_clients_list = json.loads(json_str)\n else:\n exclude_clients_list = []\n\n if args.include_clients is not None:\n json_str, = args.include_clients\n include_clients_list = json.loads(json_str)\n else:\n include_clients_list = []\n\n if args.pipeline_salt_cmd_for_asset_for_client or args.pipeline_salt_cmd_for_all_assets_for_client or args.pipeline_salt_cmd_for_all_assets_for_all_clients:\n \n # For *.yaml in client dir\n for client_file in glob.glob(\"{0}/{1}\".format(CLIENTS_SUBDIR, YAML_GLOB)):\n\n logger.info(\"Found client file: {0}\".format(client_file))\n\n # Load client YAML\n client_dict = load_client_yaml(WORK_DIR, client_file, CLIENTS_SUBDIR, YAML_GLOB, logger)\n if client_dict is None:\n raise Exception(\"Config file error or missing: {0}/{1}\".format(WORK_DIR, client_file))\n \n # Unpack oarams and select client if needed\n needed_asset = None\n if args.pipeline_salt_cmd_for_asset_for_client:\n client, needed_asset, cmd = args.pipeline_salt_cmd_for_asset_for_client\n if client_dict[\"name\"].lower() != client:\n continue\n if args.pipeline_salt_cmd_for_all_assets_for_client:\n client, cmd = args.pipeline_salt_cmd_for_all_assets_for_client\n if client_dict[\"name\"].lower() != client:\n continue\n if args.pipeline_salt_cmd_for_all_assets_for_all_clients:\n cmd, = args.pipeline_salt_cmd_for_all_assets_for_all_clients\n\n # Check client active, inclusions, exclusions and other reqs\n if (\n client_dict[\"active\"] and \"salt_project\" in client_dict[\"gitlab\"] and client_dict[\"configuration_management\"][\"type\"] in [\"salt\", \"salt-ssh\"]\n and\n (\n (\n args.exclude_clients is not None\n and\n client_dict[\"name\"].lower() not in exclude_clients_list\n )\n or\n (\n args.include_clients is not None\n and\n client_dict[\"name\"].lower() in include_clients_list\n )\n or\n (\n args.exclude_clients is None\n and\n args.include_clients is None\n )\n )\n ):\n\n # Skip clients with global jobs disabled\n if not args.ignore_jobs_disabled and \"jobs_disabled\" in client_dict and client_dict[\"jobs_disabled\"]:\n continue\n \n asset_list = get_asset_list(client_dict, WORK_DIR, TARIFFS_SUBDIR, logger, datetime.strptime(args.at_date[0], \"%Y-%m-%d\") if args.at_date is not None else datetime.now())\n\n # Threaded function\n def pipeline_salt_cmd(salt_project, asset, cmd):\n\n if args.salt_ssh:\n salt_ssh_in_salt_part = \"SALT_SSH_IN_SALT=true\"\n else:\n salt_ssh_in_salt_part = \"\"\n\n script = textwrap.dedent(\n \"\"\"\n .gitlab-server-job/pipeline_salt_cmd.sh wait {salt_project} 300 {asset} \"{cmd}\" {salt_ssh_in_salt_part}\n \"\"\"\n ).format(\n salt_project=salt_project,\n asset=asset,\n cmd=cmd,\n salt_ssh_in_salt_part=salt_ssh_in_salt_part\n )\n logger.info(\"Running bash script in thread:\")\n logger.info(script)\n run_result = subprocess.run(script, shell=True, universal_newlines=True, executable=\"/bin/bash\", stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n json_result = json.loads(run_result.stdout.rstrip())\n # Take last line as error\n result_error=run_result.stderr.rstrip().split(\"\\n\")[-1]\n result_pipeline_status=json_result.get(\"pipeline_status\", \"\")\n result_project=json_result.get(\"project\", \"\")\n result_target=json_result.get(\"target\", \"\")\n result_url=json_result.get(\"pipeline_url\", \"\")\n print(\"{status}\\t{project}\\t{target}\\t{url}\\t{error}\".format(\n status=result_pipeline_status,\n project=result_project,\n target=result_target,\n url=result_url,\n error=result_error if result_pipeline_status != \"success\" else \"\"\n ))\n\n # For each asset\n for asset in asset_list:\n \n # Pipelines are only for servers\n if asset[\"kind\"] == \"server\":\n\n # Skip assets with disabled jobs\n if \"jobs_disabled\" in asset and asset[\"jobs_disabled\"]:\n continue\n\n # Skip oher assets if specific asset is set\n if needed_asset is not None:\n if needed_asset != asset[\"fqdn\"]:\n continue\n\n # Run pipeline\n thread = threading.Thread(target=pipeline_salt_cmd, args=[client_dict[\"gitlab\"][\"salt_project\"][\"path\"], asset[\"fqdn\"], cmd])\n thread.start()\n # Give gitlab time to create tag and pipeline, otherwise it will be overloaded\n time.sleep(4)\n\n # Reroute catched exception to log\n except Exception as e:\n logger.exception(e)\n logger.info(\"Finished {LOGO} with errors\".format(LOGO=LOGO))\n sys.exit(1)\n\n logger.info(\"Finished {LOGO}\".format(LOGO=LOGO))\n","repo_name":"microdevops-com/accounting","sub_path":"services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":10522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35578092903","text":"# audio signal analyzers\n\nfrom scipy.signal import argrelextrema\nfrom scipy.stats import zscore\nfrom sklearn.mixture import GaussianMixture\nimport librosa, copy\nimport numpy as np\nfrom scipy.ndimage import convolve1d\nimport matplotlib.pyplot as plt\n\n\ndef find_bandwidth(bin_ind, spectral, bin_freqs, cut_off=0.5):\n \"\"\"\n find the bandwidth given STFT bin number, spectral, and frequency of each bin\n \"\"\"\n curr_magnitude = spectral[bin_ind]\n curr_threshold = cut_off * curr_magnitude # get cut-off threshold\n curr_cands = np.where(spectral<=curr_threshold)[0] # find all candidates which are lower than threshold\n curr_diff = curr_cands - bin_ind # get distance between curr bin and all candidates\n # find where sign change in the diff list (indicating the lower and upper bound indexes for bandwidth)\n curr_sign_change = curr_diff[:-1] * curr_diff[1:] # every point multiply its following point\n curr_sign_change_zero = np.where(curr_sign_change==0)[0] # there should be no zero distance\n assert len(curr_sign_change_zero)==0, \"zero occurs in sign change detection for BW determination\"\n curr_sign_change = np.where(curr_sign_change<0)[0] # find negative multiplication meaning sign changed\n curr_upper_index = len(curr_diff)-1\n curr_lower_index = 0\n curr_bw = -1\n if len(curr_sign_change)==0: # if there no negative multiplication, we hit the either extreme candidate\n if curr_diff[0]>0: curr_upper_index = 0 # if first candidate is positive, our peak index is too low, we set the first candidate as upper bound\n elif curr_diff[-1]<0: curr_lower_index = len(curr_diff)-1 # if last candidate is negative, our peak index is too high, we set the last candidate as lower bound\n else:\n assert False, \"wrong sign change detection\"\n if curr_upper_index != len(curr_diff)-1: # if upper index is set, we cannot have lower bound, so use half bandwidth to get full bandwidth\n curr_upper_bound = curr_cands[curr_upper_index]\n curr_bw = 2*(abs(bin_freqs[curr_upper_bound]-bin_freqs[bin_ind]))\n if curr_lower_index != 0: # if lower index is set, we cannot have upper bound\n curr_lower_bound = curr_cands[curr_lower_index]\n curr_bw = 2*(abs(bin_freqs[curr_lower_bound]-bin_freqs[bin_ind]))\n if curr_bw ==-1: # if full bandwidth is not set yet, we have normal upper and lower bounds\n curr_sign_change = curr_sign_change[0]\n curr_lower_bound = curr_cands[curr_sign_change]\n curr_upper_bound = curr_cands[curr_sign_change+1]\n curr_bw = abs(bin_freqs[curr_upper_bound]-bin_freqs[curr_lower_bound])\n return curr_bw\n\n\ndef remove_harmonics(peak_mask, spec, bin_freqs, mode='exhaust'):\n \"\"\"\n remove harmonic peaks from the given peak masks and STFT\n \"\"\"\n # frequency to bin number\n def freq2bin(freq, bin_freqs):\n diffs = np.abs(bin_freqs - freq)\n ind = np.argmin(diffs)\n return ind\n\n ori_mask = copy.deepcopy(peak_mask) # store the original mask\n spec_masked = spec * peak_mask\n peak_dim, peak_time = peak_mask.shape\n for i in range(peak_time):\n if np.sum(peak_mask[:, i])==0: continue # no peak at current time\n peak_search_range = peak_dim // 2 + 1\n search_peaks = spec_masked[:peak_search_range, i]\n search_index = np.linspace(0, peak_search_range-1, peak_search_range, dtype=int)\n search_peaks = np.stack((search_index, search_peaks), axis=1)\n search_peaks_queue = search_peaks[:, 1].argsort() # sort ascend based on peak value\n search_peaks_queue = search_peaks_queue[::-1] # reassign descending\n if mode==\"exhaust\":\n srh_len = len(search_peaks_queue) # if exhaust, remove all harmonics\n else:\n srh_len = 1 # if not, only remove harmonics of the strongest peak\n # iterate from largets to smallest peaks\n main_peaks = []\n for si in range(0, srh_len):\n if search_peaks[search_peaks_queue[si], 1]==0: continue # if no peak, skip\n if si in main_peaks: continue # skip if this is a main peak\n main_peaks.append(si) # we skip peak that have been processed avoiding remove strong peaks\n curr_peak_ind = int(search_peaks[search_peaks_queue[si],0]) # index of current peak\n curr_peak_freq = bin_freqs[curr_peak_ind]\n curr_peak_bw = find_bandwidth(curr_peak_ind, spec[:, i], bin_freqs) # get band width\n harm_factor = 2\n # loop over all harmonics until the largest freqeuncy\n while curr_peak_freq*harm_factor < bin_freqs[-1]:\n curr_harm_freq = curr_peak_freq * harm_factor\n curr_harm_band = curr_peak_bw * harm_factor # current harmonic's bandwidth\n curr_harm_start_freq = curr_harm_freq - curr_harm_band/2\n curr_harm_end_freq = curr_harm_freq + curr_harm_band/2\n curr_harm_start_ind = freq2bin(curr_harm_start_freq, bin_freqs)\n curr_harm_end_ind = freq2bin(curr_harm_end_freq, bin_freqs)\n peak_mask[curr_harm_start_ind:curr_harm_end_ind+1, i] = 0 # mask peaks in harmonic band\n harm_factor += 1 # go to next harmonic\n return peak_mask, ori_mask\n\n\ndef extract_peaks(spec, peak_movlen=5, peak_relativeth=4, peak_globalth=50):\n \"\"\"\n extract peaks from spectrogram (linear or mel-scale)\n \"\"\"\n feat_dim, _ = spec.shape\n spec_mask = np.ones_like(spec) # mask of spec\n spec_db = librosa.power_to_db(spec) # convert to db scale\n\n # extract peaks along mels\n # exclude all-same time point\n spec_var = np.var(spec_db, axis=0)\n spec_var_mask = np.ones_like(spec_var)\n spec_var_mask[spec_var==0] = 0 # if variance is zeros, all the elements have the same value (no peak exists)\n spec_var_mask_mat = np.tile(spec_var_mask, (feat_dim, 1))\n spec_mask = spec_mask * spec_var_mask_mat\n\n # move average\n ma_kernel = [1/peak_movlen] * peak_movlen\n spec_ma = convolve1d(spec_db, weights=ma_kernel, axis=0, mode='nearest')\n spec_ma_diff = spec_db - spec_ma # value over local average\n spec_mask[spec_ma_diff<peak_relativeth] = 0 # set mask based on local threshold\n\n # ### debug ###\n # plt.figure()\n # fig, ax = plt.subplots()\n # img = librosa.display.specshow(spec_mask, ax=ax)\n # plt.savefig(\"../plots/move_mask.png\")\n # plt.close()\n # plt.figure()\n # fig, ax = plt.subplots(nrows=2)\n # ax[0].plot(np.linspace(0, len(spec_mask[:,0])-1, len(spec_mask[:,0])), spec[:,9])\n # ax[1].plot(np.linspace(0, len(spec_mask[:,0])-1, len(spec_mask[:,0])), spec_mask[:,9])\n # plt.savefig(\"../plots/spec_peaks_move.png\")\n # plt.close()\n # ######\n\n # global threshold\n spec_max = np.max(spec_db, axis=0) # max at each time\n global_th = spec_max - peak_globalth # threshold has determined difference from max amplitude at each time\n global_th_mat = np.tile(global_th, (feat_dim, 1))\n spec_global_diff = spec_db - global_th_mat # value over threshold\n spec_mask[spec_global_diff<0] = 0 # set mask based on global threshold\n\n # ### debug ###\n # plt.figure()\n # fig, ax = plt.subplots()\n # img = librosa.display.specshow(spec_mask, ax=ax)\n # plt.savefig(\"../plots/global_mask.png\")\n # plt.close()\n # plt.figure()\n # fig, ax = plt.subplots(nrows=2)\n # ax[0].plot(np.linspace(0, len(spec_mask[:,0])-1, len(spec_mask[:,0])), spec[:,9])\n # ax[1].plot(np.linspace(0, len(spec_mask[:,0])-1, len(spec_mask[:,0])), spec_mask[:,9])\n # plt.savefig(\"../plots/spec_peaks_global.png\")\n # plt.close()\n # ######\n\n # local maxima\n spec_local_maxima_ind = argrelextrema(spec_db, np.greater, axis=0)\n spec_local_maxima_ind = np.array(spec_local_maxima_ind).transpose()\n spec_mask_localmax = np.zeros_like(spec_mask)\n spec_mask_localmax[spec_local_maxima_ind[:,0], spec_local_maxima_ind[:,1]] = 1\n\n # combine to all masks\n spec_mask = spec_mask * spec_mask_localmax\n\n # ### debug ###\n # plt.figure()\n # fig, ax = plt.subplots()\n # img = librosa.display.specshow(spec_mask, ax=ax)\n # plt.savefig(\"../plots/maxima_mask.png\")\n # plt.close()\n # plt.figure()\n # fig, ax = plt.subplots(nrows=2)\n # ax[0].plot(np.linspace(0, len(spec_mask[:,0])-1, len(spec_mask[:,0])), spec[:,9])\n # ax[1].plot(np.linspace(0, len(spec_mask[:,0])-1, len(spec_mask[:,0])), spec_mask[:,9])\n # plt.savefig(\"../plots/spec_peaks_maxima.png\")\n # plt.close()\n # ######\n\n return spec_mask","repo_name":"Ivan-wang/audiovibe","sub_path":"finetune/sigprocs/signal_analysis.py","file_name":"signal_analysis.py","file_ext":"py","file_size_in_byte":8614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25865030056","text":"import sqlite3\n# This is the practice DB py file\n# borrowed from an earlier project, to get it started\n\n#FOR REFERENCE ONLY*******************************************\n# ******maybe put menu in database file for future db aadmin\n\n\nsqlite_file = 'ClesiusCookies_db.sqlite' # name of the db\n\ntable_cookie_type = 'Cookie_Type' # table name\nid_field = 'Cookie_ID' # Primary key\nname_field = 'Cookie_Name' # name column\nprice_field = 'Cookie_Price' # price column\n\ndef main():\n show_menu() # call function to show user options\n\ndef show_menu():\n while True:\n print() # intentional blank line\n print(\"Menu options: \") # show user thier options...\n print(\"1: CREATE a database and table\")\n print(\"2: ADD a row of data to the table\") # \"a\" table? then select the table?\n print(\"3: UPDATE a row of data from the table\") # \"\"\"\n print(\"4: DELETE a row of data from the table\") # \"\"\"\n print(\"5: SHOW the data from entire table\") # \"\"\"\n print(\"6: DISPLAY a single row of data\") # \"\"\"\n # print(\"7: DROP TABLE-->BE CAREFUL\") hidden, for testing purposes only\n print(\"9: QUIT program\")\n print() # intentional blank line\n user_input = input(\"Please enter the number of your selection: \") # gets the user choice\n # call the function, from user's choice\n if user_input == \"1\":\n create_database()\n # elif user_input == \"2\":\n # add_row()\n # elif user_input == \"3\":\n # update_row()\n # elif user_input == \"4\":\n # delete_row()\n # elif user_input == \"5\":\n # show_all_rows()\n # elif user_input == \"6\":\n # show_single_row()\n # elif user_input == \"7\": # for testing\n # drop_table()\n elif user_input == \"9\":\n print()\n print(\"Thank you, goodbye\")\n break # ends the program\n else:\n print() # intentional blank line\n print(\"Please make a valid selection, jackass.\") # prompts user for valid input\n print() # intentional blank line\n # show_menu() nope. It loops back up to the top, did a while loop instead.\n\ndef create_database():\n print(\"--->creating the database<---\") # for testing\n # connecting to the database file\n conn = sqlite3.connect(sqlite_file)\n c = conn.cursor()\n\n # creating table with columns, put in a variable to make debugging easier\n create_table_sql = (\n 'CREATE TABLE If NOT EXISTS {tn} ('\n ' {nf} {ft} PRIMARY KEY AUTOINCREMENT ,'\n ' {nf2} {ft_T} NOT NULL , '\n ' {nf3} {ft_T} NOT NULL ,'\n ') '\n ).format(tn=table_cookie_type, nf=id_field, nf2=name_field, nf3=price_field, ft_T=field_type_txt)\n\n c.execute(create_table_sql) # pass the variable instead of putting the code into execute statement\n\n conn.commit()\n conn.close()\n\nmain()\n","repo_name":"SonjaH2001/Midterm","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11569347861","text":"import decimal\nfrom django.http import HttpRequest\nfrom shop.models import Product, Addon, DOESNT_EXIST_ID\nfrom services.deal_service import get_discounted_price\n\n\nclass Cart:\n def __init__(self, request: HttpRequest):\n self.request = request\n if not self.__assert_cart_exists():\n self.__insert_cart_in_session()\n self.session_cart: dict = self.request.session[\"cart\"]\n\n def add(self, product_id, count: int, addon_id: int = DOESNT_EXIST_ID):\n self.session_cart[product_id] = {\"self\": product_id, \"count\": count, \"addon_id\": addon_id}\n self.request.session.modified = True\n\n def set(self, product_id, count: int = None, addon_id=None):\n if count is not None:\n self.session_cart[product_id][\"count\"] = count\n if addon_id is not None:\n self.session_cart[product_id][\"addon_id\"]\n\n def update_multiple_count(self, id_counts: [str, int]):\n for entry in id_counts:\n self.session_cart[entry[0]][\"count\"] = entry[1]\n self.request.session.modified = True\n\n def get_data(self) -> dict:\n items = {}\n items_count = 0\n sub_total_price = 0\n for product_id in self.session_cart.values():\n product_items_price = 0\n product = Product.objects.get(id=product_id[\"self\"])\n if product_id[\"addon_id\"] != DOESNT_EXIST_ID:\n addon = Addon.objects.get(id=product_id[\"addon_id\"])\n product_items_price += decimal.Decimal(addon.price)\n else:\n addon = Addon(id=DOESNT_EXIST_ID, product=product)\n count = product_id[\"count\"]\n items_count += int(count)\n product_discounted_price = get_discounted_price(product)\n product_items_price += product_discounted_price * int(count)\n sub_total_price += product_items_price\n items[product_id[\"self\"]] = {\n \"self\": product,\n \"count\": count,\n \"addon\": addon,\n \"product_discounted_price\": product_discounted_price\n }\n return {\"items\": items, \"sub_total_price\": sub_total_price, \"items_count\": items_count}\n\n def has_any(self):\n return bool(len(self.session_cart))\n\n def remove_item(self, product_id):\n self.session_cart.pop(product_id)\n self.request.session.modified = True\n\n def remove_addon(self, product_id):\n self.session_cart[product_id][\"addon_id\"] = \"-1\"\n\n def clear(self):\n self.__insert_cart_in_session()\n\n def __assert_cart_exists(self):\n cart = self.request.session.get(\"cart\", None)\n if cart is None:\n return False\n return True\n\n def item_exists(self, product_id):\n return bool(self.session_cart.get(product_id, False))\n\n def __insert_cart_in_session(self):\n self.request.session[\"cart\"] = {}\n","repo_name":"HCodeKeeper/CappyBlappyShop","sub_path":"services/cart_service.py","file_name":"cart_service.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21340011587","text":"import argparse\nimport pandas as pd\nfrom utils.munging import SoccerEventExtractor\nfrom utils.sliding_window import SlidingWindow\nfrom utils.extractive_sum import ExtractiveSum\nfrom utils.abstractive_sum import T5Sum\nfrom utils.rouge_score import RougeScore\nfrom utils.collect_data import MulitScraping\nfrom utils.plot_rouge import PlotRouge\n\nif __name__ == \"__main__\":\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(description=\"Code for Football/Soccer Match Summarization\")\n parser.add_argument(\"--scrape\", required=True, help=\"Specifiy if you want to scrape data or use existing data\")\n parser.add_argument(\"--links\", required=True, help=\"Path to links file\")\n parser.add_argument(\"--output_path\", required=True, help=\"Path to output file\")\n parser.add_argument(\"--t5\", required=True, help=\"Specifiy if you want to use T5 model or not\")\n parser.add_argument(\"--eval_plot\", required=True, help=\"Specifiy if you want to plot the Rouge scores or not\")\n args = parser.parse_args()\n \n try:\n args.scrape = int(args.scrape)\n args.t5 = int(args.t5)\n args.eval_plot = int(args.eval_plot)\n \n except:\n print(\"Please specify integer values for scraping, t5 and eval_plot\")\n \n if args.scrape not in [0,1]:\n parser.error(\"Please specify 0 or 1 for scraping data\")\n \n if args.scrape == 1:\n # Scrape data from goal.com\n scraper = MulitScraping(args.links, args.output_path)\n match_df = pd.read_csv(args.output_path)\n \n else:\n # Use existing data\n match_df = pd.read_csv(args.output_path)\n \n main_df = SoccerEventExtractor(match_df)\n clean_df = main_df._generate_match_ids()\n\n # Initialize sliding window object\n slide_window = SlidingWindow(clean_df)\n\n # Create window for football match summaries\n comm_list = slide_window.create_window()\n\n # Get full match summaries\n all_ft_comm = slide_window.get_full_match_summ()\n \n # create RougeScore object\n r_score = RougeScore()\n\n # Extractive summarization\n extract_sum = ExtractiveSum(comm_list)\n\n # Using SpaCy\n spacy_comm = extract_sum.get_spacy_sum()\n spacy_comm_joined = []\n for comms in spacy_comm:\n spacy_comm_joined.append(\" \".join(comms))\n\n # Using NLTK\n nltk_comm = extract_sum.get_nltk_sum()\n \n print('Extractive done')\n\n if (args.t5 not in [0,1]):\n parser.error(\"Please specify 0 or 1 for using T5 model\")\n \n if args.t5 == 1:\n # Abstractive summarization using T5 model\n t5_comm = T5Sum(spacy_comm).get_all_comm_t5()\n r_score_t5 = r_score.get_rouge_score(all_ft_comm, t5_comm, 'T5')\n print('Abstractive done')\n else:\n print('Abstractive not done')\n\n # Calculate Rouge scores\n r_score_spacy = r_score.get_rouge_score(\n all_ft_comm, spacy_comm_joined, 'spacy')\n r_score_nltk = r_score.get_rouge_score(all_ft_comm, nltk_comm, 'nltk')\n \n if (args.eval_plot not in [0,1]):\n parser.error(\"Please specify 0 or 1 for plotting Rouge scores\")\n \n if args.eval_plot == 1:\n plt_rouge = PlotRouge()\n rouge_df = plt_rouge.extract_melt_rouge(r_score_spacy, r_score_nltk)\n \n plt_rouge.plot_uni_rouge_score(rouge_df)\n plt_rouge.plot_lcs_rouge_score(rouge_df)\n \n else:\n print('Plotting not done')\n\n","repo_name":"avisionary/commentary_analysis","sub_path":"commentary_analysis/bin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25027086988","text":"\ndef is_valid_walk(walk):\n direct = {'n':1,'s':-1,'e':2,'w':-2}\n\n count = 0\n\n if len(walk) != 10:\n return False\n else:\n for i, num in direct.items():\n for j in walk:\n if j == i:\n count += num\n if count == 0:\n return True\n else:\n return False\n\nprint (is_valid_walk(['n', 'e', 'n', 'w', 'n', 'n', 'w', 'w', 'n', 's']))\n","repo_name":"azizsaad/55-CodeWars-Problems","sub_path":"codewars/6 kyu/ten_min_walk.py","file_name":"ten_min_walk.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10229057965","text":"'''\n\tThis file takes an xml file as input and picks up all bug ids that were reported one year from date of release of current version.\n'''\nimport xml.etree.ElementTree as et\nimport datetime as dt\nfrom datetime import timedelta\npath='/home/pranav/Project/Eclipse2.1/PostReleaseFiles/'\ntree =et.parse(path+'show_bug2.1.xml')\nroot=tree.getroot()\ndate_of_rel='2003-03-27 21:30:00'\nformat = '%Y-%m-%d %H:%M:%S'\nenter='\\n'\nspace= ' '\ndate=dt.datetime.strptime(date_of_rel,format)\nf=open(path+'bugz_time.dat','w')\nfor child in root:\n\tfor neighbor in child.iter('creation_ts'):\n\t\tneighbor.text=neighbor.text[:19]\n\t\ttemp=dt.datetime.strptime(neighbor.text,format)\t\t\n\t\tif(temp-date<timedelta(days=365) and temp-date>timedelta(days=0)):\n\t\t\tfor neighbor2 in child.iter('bug_id'):\n\t\t\t\tf.write(neighbor2.text) \n\t\t\t\tf.write(space)\n\t\t\t#f.write(neighbor.text) \n\t\t\tf.write(enter)\n\n\n\t\n","repo_name":"pranavr93/Bug-Detection","sub_path":"python codes/PostReleaseFiles/XMLParser.py","file_name":"XMLParser.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37022017567","text":"from DamageTypes import DamageTypes\nimport configparser\n\nclass Mods:\n def __init__(self, name: str):\n self.Name = name\n self.Multiplier = {}\n self.DamageTypesInstance = DamageTypes()\n for entry in self.DamageTypesInstance.Multiplier:\n self.Multiplier[entry] = 0\n\n for entry in self.DamageTypesInstance.SpecialMods:\n self.Multiplier[entry] = 0\n\n def loadMod(name: str):\n modParser = configparser.ConfigParser()\n modParser.read(\"Mods/\" + name + \".ini\")\n mods = Mods(name)\n DamageTypesInstance = DamageTypes()\n for entry in DamageTypesInstance.Multiplier:\n mods.Multiplier[entry] = float(modParser[\"Mods\"][entry])\n #print(entry+\": \" + str(mods.Multiplier[entry]))\n\n for entry in DamageTypesInstance.SpecialMods:\n if entry in modParser[\"Mods\"]:\n mods.Multiplier[entry] = float(modParser[\"Mods\"][entry])\n\n if DamageTypesInstance.DamagePerStack in modParser[\"Mods\"]:\n mods.Multiplier[DamageTypesInstance.DamagePerStack] = float(modParser[\"Mods\"][DamageTypesInstance.DamagePerStack])\n\n return mods\n","repo_name":"Shortyoo/WarframeDamageCalculator","sub_path":"Mods.py","file_name":"Mods.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8594312771","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 5 16:01:32 2016\n\n@author: alex\n\"\"\"\n\nfrom AlexRobotics.dynamic import DynamicSystem as RDDS\nfrom AlexRobotics.control import linear as RCL\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\"\"\" Define system \"\"\"\n\n# Define dynamic system\ndouble_integrator = RDDS.DynamicSystem()\n\n# Define controller\nkp = 1\nkd = 1\nPD_controller = RCL.PD( kp , kd )\n\n# Asign feedback law to the dynamic system\ndouble_integrator.ctl = PD_controller.u\n\n\"\"\" Simulation and plotting \"\"\"\n\n# Phase plot\n#PP = RDDS.PhasePlot( double_integrator )\n#PP.compute()\n#PP.plot()\n\nopen_loop_vector_field = True\nclosed_loop_vector_field = True\n\ndouble_integrator.phase_plane( closed_loop_vector_field , open_loop_vector_field )\n\n# Trajectory simulation and plotting\ndouble_integrator.ubar = np.array([3])\nx0 = np.array([-5,-5])\ntf = 10\n\nopen_loop_trajectory = True\nclosed_loop_trajectory = True\n\ndouble_integrator.phase_plane_trajectory( x0 , tf , True , True , True, True )\n\n# Time plot\ndouble_integrator.Sim.plot_OL()\ndouble_integrator.Sim.plot_CL()\n\n# Hold figures alive\nplt.show()\n","repo_name":"ali493/pyro","sub_path":"old/examples/double_integrator_with_PD.py","file_name":"double_integrator_with_PD.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5534817151","text":"\"\"\"\nOriginal author: chen binbin\nmail: cbb@cbb1996.com\nlink: https://github.com/AsprinChina/L2RPN_NIPS_2020_a_PPO_Solution/blob/master/Teacher/Teacher1.py\n\nModified:\nShourya Bose | Qiuling Yang | Yu Zhang\nmail: shbose@ucsc.edu\n\"\"\"\nimport os\nimport time\nimport random\nimport grid2op\nimport numpy as np\nfrom grid2op import make\nfrom tqdm import tqdm, trange\nimport argparse\nfrom hashlib import sha1\nfrom lightsim2grid import LightSimBackend\n\n## parser\nparser = argparse.ArgumentParser(description='MODDED_TEACHER')\n\nparser.add_argument('--envname',type=str,default=\"l2rpn_wcci_2022\")\nparser.add_argument('--samplestep',type=int,default=72)\nparser.add_argument('--cap_fast_forward_at',type=int,default=1e+4)\nparser.add_argument('--supress_int_logs',type=int,default=1)\nparser.add_argument('--topn',type=int,default=350)\nparser.add_argument('--filename',type=str,default='act.npz')\nparser.add_argument('--save_every',type=int,default=2)\nparser.add_argument('--deg_of_sep',type=int,default=6) \nparser.add_argument('--use_parallel',type=int,default=0)\nparser.add_argument('--data_path',type=str,default=os.getcwd())\nparser.add_argument('--prefix',type=str,default=os.getcwd())\n\nargs = parser.parse_args()\n\n# use parallel?\nif args.use_parallel:\n from mpi4py import MPI\n\n## global vars\nenvname = args.envname\naction_buffer = [] # dict to store action buffer\ncount_buffer = [] # dict to store counts\n\ndef find_neighbors(line_id,env,deg):\n if deg == 0:\n return []\n nb = np.array([env.line_or_to_subid[line_id],env.line_ex_to_subid[line_id]]).tolist()\n line_idx = np.arange(env.n_line)\n for _ in range(deg-1):\n nbprime = nb\n for sub in nbprime:\n # check lines whose or (ex) is sub and record their ex (or)\n exlines = env.line_ex_to_subid[line_idx[env.line_or_to_subid==sub]].tolist()\n if exlines !=[]:\n nb += exlines\n orlines = env.line_or_to_subid[line_idx[env.line_ex_to_subid==sub]].tolist()\n if orlines != []:\n nb += orlines\n nb = np.unique(nb).tolist()\n return nb\n\ndef get_all_unitary_line_topologies_change(action_space,sub_id=None):\n res = []\n if sub_id is not None:\n or_lines = np.arange(action_space.n_line)[action_space.line_or_to_subid==sub_id]\n ex_lines = np.arange(action_space.n_line)[action_space.line_ex_to_subid==sub_id]\n for line in or_lines:\n status = action_space.get_change_line_status_vect()\n status[line] = True\n res.append(action_space({\"change_line_status\": status}))\n for line in ex_lines:\n status = action_space.get_change_line_status_vect()\n status[line] = True\n res.append(action_space({\"change_line_status\": status}))\n return res\n\ndef get_all_unitary_load_topologies_change(action_space,sub_id=None):\n res = []\n if sub_id is not None:\n load_bus = np.arange(env.n_load)[env.load_to_subid==sub_id]\n for lbus in load_bus:\n res.append(action_space({\"change_bus\":{\"loads_id\":[lbus]}}))\n return res\n\ndef get_all_unitary_gen_topologies_change(action_space,sub_id=None):\n res = []\n if sub_id is not None:\n gen_bus = np.arange(env.n_gen)[env.gen_to_subid==sub_id]\n for gbus in gen_bus:\n res.append(action_space({\"change_bus\":{\"generators_id\":[gbus]}}))\n return res\n\ndef get_all_unitary_storage_topologies_change(action_space,sub_id=None):\n res = []\n if sub_id is not None:\n storage_bus = np.arange(env.n_storage)[env.storage_to_subid==sub_id]\n for sbus in storage_bus:\n res.append(action_space({\"change_bus\":{\"storages_id\":[sbus]}}))\n return res\n\ndef get_unitary_actions(env,line_id,deg):\n all_actions = []\n nb_buses = find_neighbors(line_id,env,deg)\n for buses in nb_buses:\n all_actions += get_all_unitary_line_topologies_change(env.action_space,sub_id=buses)\n all_actions += get_all_unitary_load_topologies_change(env.action_space,sub_id=buses)\n all_actions += get_all_unitary_gen_topologies_change(env.action_space,sub_id=buses)\n all_actions += get_all_unitary_storage_topologies_change(env.action_space,sub_id=buses)\n return all_actions\n\ndef topology_search(env,line_id,ep,max_deg=args.deg_of_sep):\n #print('Starting topology search!',flush=True)\n obs = env.get_obs()\n min_rho, overflow_id = obs.rho.max(), obs.rho.argmax()\n if not args.supress_int_logs:\n print(\"step-%s, line-%s(from bus-%d to bus-%d) overflows, max rho is %.5f\" %\n (dst_step, overflow_id, env.line_or_to_subid[overflow_id],\n env.line_ex_to_subid[overflow_id], obs.rho.max()))\n all_actions = get_unitary_actions(env,line_id,max_deg)\n action_chosen = env.action_space({})\n tick = time.time()\n for action in all_actions:\n if not env._game_rules(action, env):\n continue\n obs_, _, done, _ = obs.simulate(action)\n if (not done) and (obs_.rho.max() < min_rho):\n min_rho = obs_.rho.max()\n action_chosen = action\n if not args.supress_int_logs:\n print(\"On episode %d and line %d,max rho decreases to %.5f, search duration: %.2f.\" %\n (ep,line_id,min_rho, time.time() - tick))\n return action_chosen\n\ndef hash_action(act):\n return sha1(act.to_vect().astype(int).data.tobytes()).hexdigest()\n\ndef save_to_buffer(env,act,line,ep,ch,ff):\n if not act == env.action_space({}):\n hash_act = hash_action(act)\n try:\n count_buffer[line][hash_act] += 1\n seen = True\n except KeyError:\n action_buffer[line][hash_act] = act.to_vect().astype(int)\n count_buffer[line][hash_act] = 1\n seen = False\n print('Episode %d, Line %d, Chronic %d, Fastforward %d: Action with hash %s %s buffer! Action was %s.'\n %(ep+1,line+1,ch+1,ff,str(hash_act),'saved to' if not seen else 'updated in',\n 'seen' if seen else 'not seen'),flush=True)\n \ndef save_buffer_to_disk(env,line,ep,prefix,top):\n if not os.path.isdir(prefix+'/save_acts'):\n os.makedirs(prefix+'/save_acts')\n if action_buffer[line].keys() != []:\n sorted_keys = [k for k,_ in sorted(count_buffer[line].items(),key=lambda x:x[1],reverse=True)]\n if len(sorted_keys) >= top:\n top_acts = np.array([action_buffer[line][key] for key in sorted_keys[:top]])\n top_counts = np.array([count_buffer[line][key] for key in sorted_keys[:top]])\n else:\n top_acts = np.array([action_buffer[line][key] for key in sorted_keys])\n top_counts = np.array([count_buffer[line][key] for key in sorted_keys])\n np.savez(prefix+'/save_acts/line_%d_%s'%(line,args.filename),action_space=top_acts,counts=top_counts,episode=ep)\n print('\\n\\nSaved to disk!\\n\\n',flush=True)\n else:\n print('XXXXX-----XXXXX---WARNING: Found no legal options for line %d on episode %d in buffer.'%(line,ep))\n print('Saving empty action!')\n top_acts = np.array([env.action({}).to_vect().astype(int)])\n top_counts = np.array([0])\n np.savez(prefix+'/save_acts/ep%d/line_%d_%s'%(ep,line,args.filename),action_space=top_acts,counts=top_counts)\n \ndef split_given_size(a, size):\n return np.split(a, np.arange(size,len(a),size))\n\nif __name__ == \"__main__\":\n \n # comm for parallel ops\n if args.use_parallel:\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n else:\n rank = 0\n size = 1\n \n # check workers if parallel\n if args.use_parallel:\n print('Rank %d of total %d.'%(rank,size),flush=True)\n \n # init the environment and ensure path of downloaded chronics\n if grid2op.MakeEnv.get_current_local_dir() != args.data_path:\n grid2op.MakeEnv.change_local_dir(args.data_path)\n env = grid2op.make(envname, backend=LightSimBackend())\n \n # attack all lines\n LINES2ATTACK = np.arange(env.n_line)\n \n # make space for all lines in dict\n action_buffer = [{} for _ in range(env.n_line)]\n count_buffer = [{} for _ in range(env.n_line)]\n \n # number of episodes\n n_episodes = int(np.ceil(int(np.min([env.max_episode_duration(),args.cap_fast_forward_at]))/args.samplestep))\n if rank == 0:\n print('Depending on values of --samplestep and --cap_fast_forward_at, running %d episodes.'%n_episodes,flush=True)\n \n # split episodes and lines for parallelization\n line_splits = np.array_split(LINES2ATTACK,size)\n \n for episode in range(n_episodes):\n \n for idx_line_split,line_split in enumerate(line_splits):\n \n if rank == idx_line_split or not args.use_parallel:\n\n for line_to_disconnect in line_split:\n \n # shuffle up the chronics\n _ = env.chronics_handler.shuffle(shuffler=lambda x: x[np.arange(len(x))])\n \n # traverse all chronics\n for chronic in range(env.chronics_handler._real_data.chronics_used.size): # loop across all chronics\n \n dst_step = np.min([episode * args.samplestep + random.randint(0, args.samplestep), env.max_episode_duration()-1]).item() # a random sampling every 6 hours\n \n if not args.supress_int_logs:\n print('\\n\\n' + '*' * 50 + '\\nScenario[%s]: at step[%d], disconnect line-%d(from bus-%d to bus-%d]' % (\n env.chronics_handler.get_name(), dst_step, line_to_disconnect,\n env.line_or_to_subid[line_to_disconnect], env.line_ex_to_subid[line_to_disconnect]),flush=True)\n \n # to the destination time-step\n _ = env.reset()\n env.fast_forward_chronics(dst_step - 1)\n if not env.done:\n # episode survived fast forwarding; now disconnect line\n obs = env.get_obs()\n new_line_status_array = np.zeros(obs.rho.shape).astype(int)\n new_line_status_array[line_to_disconnect] = -1\n action = env.action_space({\"set_line_status\": new_line_status_array})\n obs, reward, done, _ = env.step(action)\n if done:\n # disconnecting the line killed the episode\n continue\n else:\n # episode died during fast forwarding\n continue\n \n if obs.rho.max() < 1:\n # not necessary to do a dispatch\n continue\n else:\n # search a greedy action\n action = topology_search(env,line_to_disconnect,episode)\n obs_, reward, done, _ = env.step(action)\n retval = save_to_buffer(env,action,line_to_disconnect,episode,chronic,dst_step)\n \n # save to disk \n if (episode+1) % args.save_every == 0:\n # truncated (used for agents)\n save_buffer_to_disk(env,line_to_disconnect,episode,args.prefix,args.topn)\n # full (for your records) - 1e+6 is a large number\n save_buffer_to_disk(env,line_to_disconnect,episode,args.prefix+'/full',1e+6) \n \n \n # # synchronization point for parallel\n # if args.use_parallel:\n # comm.Barrier()","repo_name":"Rachelmy/L2RPN_2023","sub_path":"Simulations.py","file_name":"Simulations.py","file_ext":"py","file_size_in_byte":11901,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21519593296","text":"# -*-coding:Utf-8 -*\n\nfrom game import Game\n\n\nwidth = \"\"\nheight = \"\"\nstart = \"no\"\nwhile start.lower() == \"no\":\n\twhile type(width) is not int or int(width) > 50 or int(width) < 5:\n\t\twidth = input('How wide do you want your maze to be? Choose a number between 5 and 50: \\n > ')\n\t\twidth = int(width)\n\n\twhile type(height) is not int or int(height) > 20 or int(height) < 5:\n\t\theight = input('And what about your maze\\'s height? Once again, something between 5 and 20: \\n > ')\n\t\theight = int(height)\n\n\tgame = Game(height, width)\n\n\tprint(game)\n\n\tstart = input('That\\'s the maze game you\\'re going to play. You good with that? Say yes or no. \\n >')\n\ngame.initiate()\nprint(game)\nprint('Use your arrows to control your robot. \\n Use w to go up \\n Use s to go down \\n Use a to go left \\n une d to go right.')\n\nwhile not game.over:\n\tmove = input('>')\n\tgame.analyse(move)\n\tprint(game)\n\nprint('You\\'ve won!')\n\n\n\n","repo_name":"gastrid/Maze","sub_path":"starter.py","file_name":"starter.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22507353149","text":"\n###########################\n# A Modifyer, that takes a dataset and adds uniform noise to it...\n\nimport torch\nfrom DataModifyers.utility import apply\n\ndef rollingAverage(windowSize,DataSet,windowDim=-1):\n \n window = torch.nn.Conv1d(in_channels = DataSet.Dimension(),\n out_channels = DataSet.Dimension(),\n kernel_size = windowSize)\n window.weight.data = torch.full_like(window.weight.data,1.0/float(windowSize))\n\n return apply(window,DataSet,\" rollingAveraged WinSize: \"+str(windowSize))\n","repo_name":"Arn-BAuA/TimeSeriesAEBenchmarkSuite","sub_path":"DataModifyers/smooth.py","file_name":"smooth.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33232566068","text":"\"\"\"\nFile: compute_interest.py\n-------------------------\nAdd your comments here.\n\"\"\"\n\n\ndef main():\n \"\"\"\n You should write your code for this program in this function.\n Make sure to delete the 'pass' line before starting to write\n your own code. You should also delete this comment and replace\n it with a better, more descriptive one.\n \"\"\"\n initial_balance = float(input('Initial balance: '))\n s_year = int(input('Start Year: '))\n s_month = int(input('Start month: '))\n e_year = int(input('End Year: '))\n if e_year >= s_year:\n e_month = int(input('End month: '))\n s_date = s_year * 13 + s_month\n e_date = e_year * 13 + e_month\n if e_date > s_date:\n int_rate = float(input('Interest rate (0 to quit): '))\n if int_rate != 0:\n while int_rate != 0:\n\n date_balance = s_date\n balance_to_date = initial_balance\n\n for i in range(s_date,e_date+1):\n if date_balance % 13 != 0:\n print('Year ' + str(date_balance // 13) + ', month ' + str(date_balance % 13) + ' balance: ' + str(balance_to_date))\n balance_to_date *= (int_rate + 1)\n date_balance += 1\n\n int_rate = float(input('Interest rate (0 to quit): '))\n\n else:\n date_error()\n\n else:\n date_error()\n\n\ndef date_error():\n print('Starting date needs to be before ending date.')\n\n\n# This provided line is required at the end of a Python file\n# to call the main() function.\nif __name__ == '__main__':\n main()\n","repo_name":"aktivx/CS106A-Python-raw","sub_path":"AS2-compute_interest.py","file_name":"AS2-compute_interest.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24477253771","text":"#!/usr/bin/env python3\n\ni = [x.strip() for x in open(\"day04.txt\", \"r\").readlines()]\n\n\nclass bingo:\n def __init__(self, board):\n self.board = board\n self.mask = [[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]\n\n def mark_ball(self,number):\n for count, board_line in enumerate(self.board):\n for count2, board_number in enumerate(board_line):\n #print(f\"{count2}x{board_number}\")\n if int(board_number) == int(number):\n #print(\"setting mask value\")\n self.mask[count][count2] = 1\n \n def calculate_is_winner(self):\n winner = False\n for mask_line in self.mask:\n if sum(mask_line) == 5:\n winner = True\n # check the columns\n for x in range(5):\n if sum([self.mask[y][x] for y in range(5)]) == 5:\n winner = True\n return winner\n \n def calculate_value(self):\n sum = 0\n for line_number, line in enumerate(self.mask):\n for line_number2, line_value in enumerate(line):\n if line_value == 0:\n sum += int(self.board[line_number][line_number2])\n return sum\n \n \nballs = i[0].split(\",\")\n\nboards = []\nline_numbers = []\ncount = 0\nfor line in i[1:]:\n #print(count)\n #print(f\"{line}\")\n count +=1\n if count == 1:\n continue\n \n line_numbers.append(line.split())\n \n if count == 6:\n boards.append(bingo(line_numbers.copy()))\n \n count = 0\n line_numbers = []\n\n\n\nwinner = None\nfinal_ball = None\n\nfor item in balls:\n print(item)\n for board_id, board in enumerate(boards):\n if not winner:\n board.mark_ball(item)\n if board.calculate_is_winner() and not winner:\n print(\"Winner is board {}\".format(board_id))\n winner = board_id\n final_ball = item\n \nsum = boards[winner].calculate_value()\nprint(f\"Board sum is: {sum}\")\nprint(f\"Final ball is: {final_ball}\")\nprint(f\"Final ball value is: {int(final_ball)*int(final_ball)}\")\n","repo_name":"sbrinkerhoff/advent-of-code-2021","sub_path":"day04-python/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15899341332","text":"from flask import Blueprint, render_template, current_app, abort, g, \\\n url_for, request, session, flash\nfrom galatea.tryton import tryton\nfrom flask_babel import gettext as _, lazy_gettext\nfrom flask_paginate import Pagination\nfrom flask_wtf import Form\nfrom wtforms import BooleanField, DateField, DateTimeField, FloatField, \\\n IntegerField, SelectField, TextAreaField, TextField, PasswordField, \\\n validators\nfrom htmlmin.decorator import htmlmin\n\nsurvey = Blueprint('survey', __name__, template_folder='templates')\n\nDISPLAY_MSG = lazy_gettext('Displaying <b>{start} - {end}</b> of <b>{total}</b>')\n\nGALATEA_WEBSITE = current_app.config.get('TRYTON_GALATEA_SITE')\nLIMIT = current_app.config.get('TRYTON_PAGINATION_SURVEY_LIMIT', 20)\n\nSurvey = tryton.pool.get('survey.survey')\n\nSURVEY_FIELD_NAMES = ['name', 'code', 'slug', 'esale_description']\nSURVEY_EXCLUDE_DATA = ['csrf_token']\n\n\nclass BaseForm(Form):\n \"\"\" A Base Form\"\"\"\n\n def __init__(self, *args, **kwargs):\n Form.__init__(self, *args, **kwargs)\n\n def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n return True\n\n def reset(self):\n pass\n\n\n@survey.route(\"/<slug>\", methods=[\"GET\", \"POST\"], endpoint=\"survey\")\n@tryton.transaction()\n@htmlmin\ndef survey_detail(lang, slug):\n '''Survey Detail'''\n\n domain = [\n ('slug', '=', slug),\n ('websites', 'in', [GALATEA_WEBSITE]),\n ('active', '=', True),\n ('esale', '=', True),\n ]\n if not session.get('logged_in'):\n domain.append(('login', '=', False))\n if not session.get('manager'):\n domain.append(('manager', '=', False))\n\n surveys = Survey.search(domain, limit=1)\n\n if not surveys:\n abort(404)\n survey, = surveys\n\n survey_form = Survey.galatea_survey_form(survey)\n total_steps = len(survey_form.keys())\n\n for step in survey_form:\n for field in survey_form[step]['fields']:\n name = field['name']\n label = field['label']\n description = field['help']\n default_value = field.get('default_value', None)\n \n field_validators = []\n if field['required']:\n field_validators.append(validators.Required())\n if field['email']:\n field_validators.append(validators.Email())\n if field['url']:\n field_validators.append(validators.URL())\n\n if field['type_'] == 'boolean':\n setattr(BaseForm, name, BooleanField(\n label, description=description, default=default_value,\n validators=field_validators))\n elif field['type_'] == 'date':\n setattr(BaseForm, name, DateField(\n label, description=description, default=default_value,\n validators=field_validators))\n elif field['type_'] == 'datetime':\n setattr(BaseForm, name, DateTimeField(\n label, description=description, default=default_value,\n validators=field_validators))\n elif field['type_'] == 'float':\n setattr(BaseForm, name, FloatField(\n label, description=description, default=default_value,\n validators=field_validators))\n elif field['type_'] == 'integer':\n setattr(BaseForm, name, IntegerField(\n label, description=description, default=default_value,\n validators=field_validators))\n elif field['type_'] == 'numeric':\n setattr(BaseForm, name, IntegerField(\n label, description=description, default=default_value,\n validators=field_validators))\n elif field['type_'] == 'selection':\n choices = []\n for option in field['selection'].split('\\n'):\n choices.append(option.split(':'))\n setattr(BaseForm, name, SelectField(\n label, description=description, default=default_value,\n choices=choices, validators=field_validators))\n elif field['type_'] == 'char' and field['textarea']:\n setattr(BaseForm, name, TextAreaField(\n label, description=description, default=default_value,\n validators=field_validators))\n elif field['type_'] == 'char' and field['password']:\n setattr(BaseForm, name, PasswordField(\n label, description=description, default=default_value,\n validators=field_validators))\n else:\n setattr(BaseForm, name, TextField(\n label, description=description, default=default_value,\n validators=field_validators))\n\n breadcrumbs = [{\n 'slug': url_for('.surveys', lang=g.language),\n 'name': _('Forms'),\n }, {\n 'slug': url_for('.survey', lang=g.language, slug=survey.slug),\n 'name': survey.name,\n }]\n\n form = BaseForm(request.form)\n if form.validate_on_submit():\n data = {}\n for k, v in request.form.iteritems():\n if k not in SURVEY_EXCLUDE_DATA:\n data[k] = v\n result = Survey.save_data(survey, data)\n\n if result:\n # ok. render thanks template\n return render_template('survey-thanks.html',\n breadcrumbs=breadcrumbs,\n survey=survey,\n )\n else:\n flash(_('An error occured when save data. Not send form. ' \\\n 'Repeat or contact us'), 'danger')\n\n return render_template('survey.html',\n breadcrumbs=breadcrumbs,\n survey=survey,\n total_steps=total_steps,\n survey_form=survey_form,\n form=form,\n )\n\n@survey.route(\"/\", endpoint=\"surveys\")\n@tryton.transaction()\ndef survey_list(lang):\n '''Surveys'''\n\n try:\n page = int(request.args.get('page', 1))\n except ValueError:\n page = 1\n\n domain = [\n ('websites', 'in', [GALATEA_WEBSITE]),\n ('active', '=', True),\n ('esale', '=', True),\n ]\n if not session.get('logged_in'):\n domain.append(('login', '=', False))\n if not session.get('manager'):\n domain.append(('manager', '=', False))\n\n total = Survey.search_count(domain)\n offset = (page-1)*LIMIT\n\n order = [\n ('id', 'DESC'),\n ]\n surveys = Survey.search_read(\n domain, offset, LIMIT, order, SURVEY_FIELD_NAMES)\n\n pagination = Pagination(\n page=page, total=total, per_page=LIMIT, display_msg=DISPLAY_MSG, bs_version='3')\n\n #breadcumbs\n breadcrumbs = [{\n 'slug': url_for('.surveys', lang=g.language),\n 'name': _('Forms'),\n }]\n\n return render_template('surveys.html',\n breadcrumbs=breadcrumbs,\n pagination=pagination,\n surveys=surveys,\n )\n","repo_name":"NaN-tic/flask-galatea_survey","sub_path":"survey.py","file_name":"survey.py","file_ext":"py","file_size_in_byte":7032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17548763165","text":"#import random\nfrom pomocne_fce_knp import *\n\nODDELOVAC = \"=\" * 50\nMOZNOSTI = (\"kamen\", \"nuzky\", \"papir\")\n\n#úvod, který importuju do main()\ndef vypsani_uvitani():\n ODDELOVAC = \"=\" * 50\n MOZNOSTI = (\"kamen\", \"nuzky\", \"papir\")\n print(\n f\"{ODDELOVAC}\",\n f\"{'Pojď si zahrát hru':^50}\",\n f\"{','.join(MOZNOSTI).upper():^50}\",\n f\"{ODDELOVAC}\",\n sep=\"\\n\"\n )\n# hlavní kod hry, přes definované fce, které importuji do main()\ndef loop_hra():\n skore_hrac = 0\n skore_computer = 0\n while True:\n hrac_volba = zadani_hrac()\n ukonceni_hry(hrac_volba)\n overeni_zadani(hrac_volba, MOZNOSTI)\n computer_volba = zadani_computer()\n print(f\"Počítač volí: {computer_volba}\")\n print(f\"{ODDELOVAC}\")\n vysledek = vyhodnoceni(hrac_volba, computer_volba)\n print(f\"{vysledek: ^50}\".upper())\n print(f\"{ODDELOVAC}\")\n skore_hrac, skore_computer = pricteni_bodu(vysledek, skore_hrac, skore_computer) # uložim si do dvou proměnných\n vypsani_skore(skore_hrac, skore_computer)\n print(f\"{'Další hra': ^50}\")\n print(f\"{ODDELOVAC}\")\n\n\n","repo_name":"vrbato/kamen_nuzky_papir","sub_path":"kod_knp.py","file_name":"kod_knp.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1372762895","text":"import argparse\nimport boto3\nimport os\n\n# Input\nparser = argparse.ArgumentParser(description=\"Upload a file.\")\nparser.add_argument(\"bucket_name\", metavar=\"B\", type=str)\nparser.add_argument(\"file_path\", metavar=\"P\", type=str)\nargs = parser.parse_args()\n\n# Constants\nBUCKET_NAME = args.bucket_name\nFILE_NAME = os.path.normpath(args.file_path)\nRESOURCES_DIR = \"resources\"\nINPUT_DIR = os.path.join(RESOURCES_DIR, \"uploads\")\nINPUT_FILE = os.path.join(INPUT_DIR, FILE_NAME)\n\n\nif __name__ == \"__main__\":\n\n # Create output directory if it doesn't exist\n if os.path.exists(INPUT_DIR) is False:\n os.mkdir(INPUT_DIR)\n\n # Upload the file\n s3 = boto3.resource(\"s3\")\n s3.meta.client.upload_file(INPUT_FILE, BUCKET_NAME, FILE_NAME)\n","repo_name":"RadiantTechLtd/Guides","sub_path":"utility/AWS_S3_Buckets/scripts/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31493147587","text":"\"\"\"\n8. Name Search\nIf you have downloaded the source code you will find the following files in the Chapter 07\nfolder:\n•\t GirlNames.txt This file contains a list of the 200 most popular names given to girls\nborn in the United States from the year 2000 through 2009.\n•\t BoyNames.txt This file contains a list of the 200 most popular names given to boys\nborn in the United States from the year 2000 through 2009.\nWrite a program that reads the contents of the two files into two separate lists. The user\nshould be able to enter a boy’s name, a girl’s name, or both, and the application will display\nmessages indicating whether the names were among the most popular.\n(You can access the Premium Companion Website at www.pearsonglobaleditions.com/gaddis.)\n\n\n@author Sharaf Qeshta\n\"\"\"\n\n\ndef main():\n boys_names = []\n girls_names = []\n try:\n boys_file = open(\"BoyNames.txt\")\n for line in boys_file:\n boys_names.append(line.strip())\n girls_file = open(\"GirlNames.txt\")\n for line in girls_file:\n girls_names.append(line)\n except Exception as error:\n print(error)\n\n name = input(\"Enter a name: \")\n if name in boys_names or name in girls_names:\n print(f\"the name {name} exist among the most popular names\")\n else:\n print(f\"the name {name} is not exist among the most popular names\")\n\n\nmain()\n","repo_name":"sharaf-qeshta/starting_out_with_python_exercises_solutions","sub_path":"Chapter_07/Exercise_07_08/Exercise_07_08.py","file_name":"Exercise_07_08.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1423894419","text":"from django.db.models import Count\nfrom rest_framework import generics, filters\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom pp5_gamer_verse_drf_api.permissions import IsOwnerOrReadOnly\nfrom .models import Profile\nfrom .serializers import ProfileSerializer\n\n# DRF-API walkthrough used to get guidance on creating profile views\n# Original code has been modified to suit project purpose\n\n\nclass ProfileList(generics.ListAPIView):\n \"\"\"\n List all profiles\n \"\"\"\n queryset = Profile.objects.annotate(\n posts_count=Count('owner__post', distinct=True),\n reviews_count=Count('owner__post', distinct=True),\n followers_count=Count('owner__followed', distinct=True),\n following_count=Count('owner__following', distinct=True)\n ).order_by('-created_on')\n serializer_class = ProfileSerializer\n filter_backends = [\n filters.OrderingFilter,\n DjangoFilterBackend,\n ]\n filterset_fields = [\n 'owner__following__followed__profile',\n 'owner__followed__owner__profile'\n ]\n ordering_fields = [\n 'posts_count',\n 'reviews_count'\n 'followers_count',\n 'following_count',\n 'owner__following__created_on',\n 'owner__followed__created_on',\n ]\n\n\nclass ProfileDetail(generics.RetrieveUpdateAPIView):\n '''\n Retrieve or update profile fields if owner\n '''\n permission_classes = [IsOwnerOrReadOnly]\n queryset = Profile.objects.annotate(\n posts_count=Count('owner__post', distinct=True),\n reviews_count=Count('owner__post', distinct=True),\n followers_count=Count('owner__followed', distinct=True),\n following_count=Count('owner__following', distinct=True)\n ).order_by('-created_on')\n serializer_class = ProfileSerializer\n","repo_name":"Jbachtiger/ci-pp5-gamer-verse-drf-api","sub_path":"profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30009851911","text":"import urllib.parse\n\nfrom xcalar.container.connectors.webhdfs import WebHDFSConnector\nimport xcalar.container.target.base as target\n\n\n@target.register(\n \"HttpFs\", is_available=WebHDFSConnector.is_available(kerberos=False))\n@target.param(\"httpfsnodes\", \"Semi-colon delimited list of httpfs nodes\")\nclass HttpfsNoKerberosTarget(target.BaseTarget):\n \"\"\"\n Connect to a HDFS cluster via HttpFs.\n\n Httpfs node to be specified in the form\n <FQDN of httpfs node>:<port number>.\n <port number> if omitted,defaults to 14000.\n\n Multiple httpfs nodes can be specified with semi-colon delimiter. E.g.\n\n http://httpfsnode1.example.com;http://httpfsnode2.example.com:14001;http://httpfsnode3.example.com;...\n\n You may use https instead of http as well. The default is http if\n protocol is not provided.\n \"\"\"\n\n def __init__(self, name, path, httpfsnodes, **kwargs):\n super(HttpfsNoKerberosTarget, self).__init__(name)\n\n nodes = []\n for node in httpfsnodes.split(\";\"):\n parsed = urllib.parse.urlparse(node)\n protocol = \"http\" if parsed.scheme == \"\" else parsed.scheme\n hostname = parsed.netloc if parsed.netloc != \"\" else parsed.path\n if hostname == \"\":\n continue\n tmp = hostname.split(\":\")\n if (len(tmp) > 1):\n fqdn = tmp[0]\n port = int(tmp[1])\n else:\n fqdn = hostname\n port = 14000\n\n nodes.append(\"%s://%s:%d\" % (protocol, fqdn, port))\n\n nn = \";\".join(nodes)\n self.connector = WebHDFSConnector(nn, kerberos=False, keytab=None)\n\n def is_global(self):\n return True\n\n def get_files(self, path, name_pattern, recursive, **user_args):\n return self.connector.get_files(path, name_pattern, recursive)\n\n def open(self, path, opts):\n return self.connector.open(path, opts)\n","repo_name":"varlogtim/xcalar","sub_path":"src/bin/sdk/xpu/xcalar/container/target/httpfsnokerberos.py","file_name":"httpfsnokerberos.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29599051338","text":"from stable_baselines3 import PPO, SAC\nfrom stable_baselines3.common.vec_env import DummyVecEnv\nfrom stable_baselines3.common.env_util import make_vec_env\n\nimport tactile_gym.envs\nfrom tactile_gym.sb3_helpers.params import import_parameters\nfrom tactile_gym.sb3_helpers.custom.custom_torch_layers import CustomCombinedExtractor\n\nif __name__ == \"__main__\":\n\n algo_name = 'ppo'\n # algo_name = 'sac'\n\n # show gui can only be enabled for n_envs = 1\n # if using image observation SubprocVecEnv is needed to replace DummyVecEnv\n # as pybullet EGL rendering requires separate processes to avoid silent\n # rendering issues.\n seed = 1\n n_envs = 1\n show_gui = True\n\n # env_id = \"edge_follow-v0\"\n # env_id = \"surface_follow-v0\"\n # env_id = \"surface_follow-v1\"\n # env_id = \"object_roll-v0\"\n env_id = \"object_push-v0\"\n # env_id = \"object_balance-v0\"\n\n env_args, rl_params, algo_params = import_parameters(env_id, algo_name)\n env_args['env_params']['show_gui'] = show_gui\n\n env = make_vec_env(\n env_id,\n env_kwargs=env_args,\n n_envs=n_envs,\n seed=seed,\n vec_env_cls=DummyVecEnv,\n )\n\n algo_params = {\n \"policy_kwargs\": {\n \"features_extractor_class\": CustomCombinedExtractor,\n \"features_extractor_kwargs\": {\n \"cnn_output_dim\": 128,\n \"mlp_extractor_net_arch\": [64, 64],\n },\n \"net_arch\": [dict(pi=[128, 128], vf=[128, 128])],\n },\n }\n\n if algo_name == \"ppo\":\n model = PPO(\"MultiInputPolicy\", env, **algo_params, verbose=1)\n\n elif algo_name == \"sac\":\n model = SAC(\"MultiInputPolicy\", env, **algo_params, verbose=1)\n\n model.learn(total_timesteps=100000)\n\n n_eval_episodes = 10\n for i in range(n_eval_episodes):\n obs = env.reset()\n done = False\n while not done:\n action, _states = model.predict(obs, deterministic=True)\n obs, reward, done, info = env.step(action)\n env.render()\n\n env.close()\n","repo_name":"dexterousrobot/tactile_gym","sub_path":"tactile_gym/sb3_helpers/simple_sb3_example.py","file_name":"simple_sb3_example.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"11495423886","text":"#! usr/bin/python3\n# 1000-digit\ntab = []\nsuma_wierszy = 0\narray = []\nthirteen = {} # dictionary which storage thirteen adjacent digits\ns = ''\nkey = ''\nproduct = 1\ncounter = 0\nfor wiersz in open('digit.txt'): # odczytywanie wiersz po wierszy pliku digit.txt\n tab.append(wiersz) # i zapisanie tego do tablicy tab\n suma_wierszy = suma_wierszy + 1\nliczba_elementow = len(wiersz)\nfor x in range(suma_wierszy): # połączenie elementów tablicy tab w pojedyncze elementy w tablicy array\n for y in range(liczba_elementow - 1):\n array.append(tab[x][y])\nprint(array)\n# kod w potrojnym cudzyslowie pozwala zlaczyc wszystkie elementy tablicy w jeden\n\"\"\"array = s.join(array)\nprint(array)\nprint(len(array))\"\"\"\n# zliczanie kolejnych elementów tablicy array\nfor x in range(len(array)):\n if x > len(array) - 13:\n break\n while len(key) < 13:\n key = key + str(array[x])\n product = product * int(array[x])\n x = x + 1\n thirteen[key] = product\n key = ''\n product = 1\nprint(max(thirteen.values()))","repo_name":"jedrzejmatuszak/Project-Euler","sub_path":"PE8 - Largest product in a series.py","file_name":"PE8 - Largest product in a series.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39852467936","text":"import json\n\n\ntotal_items = 140\n\ndef get_current_stock(filepath):\n # takes filepath relative to main file\n # returns 1 * n 2D array with nth place holding product\n # of db_letter n\n\n print('fetching current stocks ...')\n current_stock = [[0] for i in range(total_items)]\n with open(filepath) as file:\n stock = json.load(file)\n\n for product in stock:\n db_letter = product['kind']['db_letter']\n current_stock[db_letter][0] += product['amount']\n\n print('current stock ...')\n print(current_stock)\n print('completed fetching stock :)')\n\n return current_stock\n","repo_name":"jayam04/simcoscripts","sub_path":"scripts/quantity.py","file_name":"quantity.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7919074269","text":"#!/usr/bin/env python\n\nfrom http import HTTPStatus\nfrom http.server import HTTPServer, SimpleHTTPRequestHandler\nfrom os import system, path\nfrom rich import pretty, inspect, print as pp\nimport json\nimport socket\nimport ssl\nimport sys\nimport uuid\n\npretty.install()\n\n\ndef usage():\n print(f\"Usage: python {sys.argv[0]} [http|https]\")\n exit(42)\n\n\nif len(sys.argv) != 2:\n usage()\nif sys.argv[1] == \"https\":\n SSL = True\nelif sys.argv[1] == \"http\":\n SSL = False\nelse:\n usage()\n\nif SSL:\n proto = \"https\"\n port = 50443\n if not path.exists(\"key.pem\"):\n system(\n \"openssl req -nodes -x509 -newkey rsa:4096 -days 365 \"\n \"-keyout key.pem -out cert.pem -subj '/CN=hellothere'\"\n )\nelse:\n proto = \"http\"\n port = 50080\n\n\nclass MyHTTPRequestHandler(SimpleHTTPRequestHandler):\n def handle_ALL(self):\n # Dump our logs\n data = dict()\n data[\"ip\"] = self.client_address[0]\n data[\"port\"] = self.client_address[1]\n data[\"date\"] = self.date_time_string()\n data[\"method\"] = self.command\n data[\"path\"] = self.path\n data[\"version\"] = self.request_version\n data.update(dict(self.headers))\n\n\n if 'Content-Length' in self.headers:\n # from rich import inspect as i\n # import ipdb; ipdb.set_trace()\n data[\"body\"] = self.rfile.read(int(self.headers['Content-Length'])).decode(\"utf-8\")\n print(\"body: \", data[\"body\"])\n\n with open(f\"dump/{proto}/{uuid.uuid4().hex}.json\", \"w\") as f:\n f.write(json.dumps(data, indent=2))\n\n # Handle a minimalist answer\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(b\"ok\")\n return\n\n def handle_one_request(self):\n try:\n self.raw_requestline = self.rfile.readline(65537)\n if len(self.raw_requestline) > 65536:\n self.requestline = \"\"\n self.request_version = \"\"\n self.command = \"\"\n self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG)\n return\n if not self.raw_requestline:\n self.close_connection = True\n return\n if not self.parse_request():\n return\n mname = \"do_\" + self.command\n self.handle_ALL()\n self.wfile.flush()\n except socket.timeout as e:\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = True\n return\n\n\nhttpd = HTTPServer((\"127.0.0.1\", port), MyHTTPRequestHandler)\nif SSL:\n httpd.socket = ssl.wrap_socket(\n httpd.socket, keyfile=\"key.pem\", certfile=\"cert.pem\", server_side=True\n )\n\nprint(f\"Server running on {proto}://127.0.0.1:{port}/\")\nhttpd.serve_forever()","repo_name":"laluka/pypotomux","sub_path":"potomiel-http.py","file_name":"potomiel-http.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"73014882505","text":"from openpyxl import Workbook\nfrom scrapy.http import FormRequest\nimport csv\nimport os.path\nimport scrapy\nimport glob\nfrom decouple import config\n\n\nclass ItemsSpider(scrapy.Spider):\n name = 'items'\n allowed_domains = ['cs.trade']\n start_urls = ['http://cs.trade/']\n\n def start_requests(self):\n frmdata = {}\n url = \"https://cs.trade/loadBotInventory?order_by=price_desc&bot=all&_=1648140077424\"\n headers = {\n 'Connection': 'keep-alive',\n 'sec-ch-ua': '\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"98\", \"Google Chrome\";v=\"98\"',\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'sec-ch-ua-mobile': '?0',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36',\n 'sec-ch-ua-platform': '\"Linux\"',\n 'Origin': 'https://cs.trade',\n 'Sec-Fetch-Site': 'same-site',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Dest': 'empty',\n 'Referer': 'https://cs.trade/',\n 'Accept-Language': 'en-US,en;q=0.9,vi;q=0.8',\n 'cookie': config(\"COOKIE\")\n }\n\n yield FormRequest(url, callback=self.parse, formdata=frmdata, headers=headers)\n\n def parse(self, response):\n items = response.json()[\"inventory\"]\n for item in items:\n if item[\"app_id\"] == self.appId:\n yield {\n \"name\": item[\"market_hash_name\"],\n \"price\": item[\"price\"],\n }\n\n def close(self, reason):\n csv_file = max(glob.iglob('*csv'), key=os.path.getctime)\n\n wb = Workbook()\n ws = wb.active\n\n with open(csv_file, 'r') as f:\n for row in csv.reader(f):\n ws.append(row)\n\n wb.save(csv_file.replace('.csv', '') + '.xlsx')\n","repo_name":"toannguyen3105/pdt-crawler-cstrade","sub_path":"items_list_spider/items_list_spider/spiders/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9724352094","text":"import os\n\nfrom apscheduler.triggers.cron import CronTrigger\nfrom japronto import Application\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\nimport aiodocker\n\nDFG_URL_CLONE = os.environ['DFG_URL_CLONE']\nDFG_PATH_DOCKERFILE = os.getenv('DFG_PATH_DOCKERFILE', 'Dockerfile')\nDFG_IMAGE_TAG = os.environ['DFG_IMAGE_TAG']\nDFG_AUTH_BASE64 = os.getenv('DFG_AUTH_BASE64', None)\n\nasync def logging(data):\n sha256 = None\n async for item in data:\n if 'stream' in item:\n if item['stream'] != '\\n':\n print(item['stream'].rstrip())\n elif 'status' in item:\n print(item['status'])\n elif 'aux' in item:\n if 'ID'in item['aux']:\n sha256 = item['aux']['ID']\n print(sha256)\n if 'Digest'in item['aux']:\n sha256 = item['aux']['Digest']\n print(sha256)\n elif 'error' in item:\n print(item['error'])\n else:\n print(item)\n return sha256\n\n\nasync def build_from_git():\n try:\n repo_url = DFG_URL_CLONE.split('@')[-1]\n print(f\"Clone git {repo_url}\")\n except Exception as e:\n pass\n docker = aiodocker.Docker()\n build = await docker.images.build(\n remote=DFG_URL_CLONE,\n path_dockerfile=DFG_PATH_DOCKERFILE,\n tag=DFG_IMAGE_TAG,\n quiet=False,\n buildargs=dict(**os.environ),\n nocache=True,\n pull=True,\n rm=True,\n forcerm=False,\n labels=None,\n stream=True\n )\n\n await logging(build)\n push = await docker.images.push(DFG_IMAGE_TAG, auth=DFG_AUTH_BASE64, stream=True)\n await logging(push)\n await docker.close()\n\n\nasync def connect_scheduler():\n scheduler = AsyncIOScheduler(timezone=\"UTC\")\n scheduler.add_job(build_from_git, CronTrigger.from_crontab(os.getenv('DFG_CRON', '0 * * * *')), max_instances=1)\n scheduler.start()\n\n\nasync def index(request):\n return request.Response(json='ok')\n\n\napp = Application()\nif 'DFG_BUILD_ON_START' in os.environ:\n app.loop.run_until_complete(build_from_git())\napp.loop.run_until_complete(connect_scheduler())\nrouter = app.router\nrouter.add_route('/', index)\napp.run(host='0.0.0.0', port=8080, debug=True)","repo_name":"Negashev/dfg","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24045863761","text":"import dbconn\n\n\ndef getAll():\n conn = dbconn.get()\n\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"select amount, description, applied_on from expense\n order by applied_on desc limit 10\"\"\"\n )\n\n column_names = ['amount', 'description', 'applied_on']\n\n expenses = []\n for row in cursor.fetchall():\n # Convert row list to dictionary with column names as keys\n expense = dict(zip(column_names, row))\n expense['description'] = expense['description'].decode('utf-8')\n expenses.append(expense)\n\n return expenses\n\n\ndef groupByDate(expenses):\n keyed_grouped_expenses = {}\n\n for ungrouped_expense in expenses:\n if ungrouped_expense['applied_on'] not in keyed_grouped_expenses:\n keyed_grouped_expenses[ungrouped_expense['applied_on']] = []\n\n keyed_grouped_expenses[\n ungrouped_expense['applied_on']\n ].append(ungrouped_expense)\n\n grouped_expenses = keyed_grouped_expenses.items()\n grouped_expenses.sort(reverse=True)\n\n return grouped_expenses\n\n\ndef add(description, amount, applied_on):\n conn = dbconn.get()\n conn.cursor().execute(\n \"\"\"insert into expense (amount, description, applied_on)\n values (%s, %s, %s)\"\"\",\n (amount, description, applied_on)\n )\n conn.commit()\n\n\ndef getCategories():\n conn = dbconn.get()\n\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n select\n lower(split_part(description, ' ', 1)) category,\n sum(amount) as total\n from expense\n group by lower(split_part(description, ' ', 1))\n order by total desc\n \"\"\"\n )\n\n column_names = ['category', 'total']\n\n categories = []\n for row in cursor.fetchall():\n # Convert row list to dictionary with column names as keys\n categories.append(dict(zip(column_names, row)))\n\n return categories\n","repo_name":"simonbrahan/expenses","sub_path":"lib/expenses/expense.py","file_name":"expense.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74945142985","text":"import random\n# GGM import Regular Expression, re\nimport re\nimport string\nimport sys\nfrom datetime import datetime, timedelta\nfrom locale import currency\n\nimport pytz\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models.functions.window import Lead\nfrom django.shortcuts import redirect, render\nfrom django.utils import timezone\n\nfrom Badges.enums import Event\nfrom Badges.events import register_event\nfrom Badges.models import CourseConfigParams, PlayerType\nfrom Instructors.constants import unlimited_constant\nfrom Instructors.lupaQuestion import CodeSegment, LupaQuestion, lupa_available\nfrom Instructors.models import (Answers, Challenges, ChallengesQuestions,\n DynamicQuestions, MatchingAnswers, Questions,\n StaticQuestions)\nfrom Instructors.questionTypes import (QuestionTypes, dynamicQuestionTypesSet,\n questionTypeFunctions,\n staticQuestionTypesSet)\nfrom Instructors.views.dynamicQuestionView import makeLibs\nfrom Instructors.views.utils import current_localtime, datetime_to_local\nfrom oneUp.ckeditorUtil import config_ck_editor\nfrom Students.models import (CalloutParticipants, DuelChallenges, Student,\n StudentAnswerHints, Teams, TeamChallenges, StudentPlayerType)\nfrom Students.views.utils import studentInitialContextDict\n\n\ndef remove_old_challenge_session_entries(session):\n sessionitems = list(session.items())\n for k, v in sessionitems:\n # We want to make sure this really matches an attempt which we added.\n # AttemptIds all have the form \"challenge:NUM@DATETIME\" where NUM is the actual\n # challenge number and DATETIME is the actual date and time it was started.\n parts = k.split(\"@\")\n if len(parts) == 2:\n challpart = parts[0]\n datepart = parts[1]\n if challpart[:10] == 'challenge:':\n date = datetime.strptime(datepart, \"%m/%d/%Y %I:%M:%S %p\")\n delta = datetime.utcnow() - date\n if delta.days > 30:\n del session[k]\n\n\ndef makeSerializableCopyOfDjangoObjectDictionary(obj):\n dict = obj.__dict__.copy()\n # We remove the Django Status object from the dictionary to prevent serialization problems\n dict.pop(\"_state\", None)\n return dict\n\n\n@login_required\ndef ChallengeSetup(request):\n\n context_dict, currentCourse = studentInitialContextDict(request)\n student = context_dict['student']\n if 'currentCourseID' in request.session:\n\n questionObjects = []\n sessionDict = {}\n attemptId = ''\n \n if request.POST:\n if request.POST['challengeId']:\n ccp = CourseConfigParams.objects.get(courseID=currentCourse)\n context_dict['strongHintDeduction'] = ccp.weightStrongHint\n context_dict['weakHintDeduction'] = ccp.weightBasicHint\n context_dict['hintsUsed'] = ccp.hintsUsed\n context_dict['questionTypes'] = QuestionTypes\n\n challengeId = request.POST['challengeId']\n context_dict['challengeID'] = challengeId\n challenge = Challenges.objects.get(\n pk=int(request.POST['challengeId']))\n \n if \"duelID\" in request.POST:\n duel_id = request.POST['duelID']\n duel_challenge = DuelChallenges.objects.get(\n pk=int(duel_id))\n context_dict['challengeName'] = duel_challenge.duelChallengeName\n context_dict['isduration'] = True\n total_time = datetime_to_local(duel_challenge.acceptTime) + \\\n timedelta(minutes=duel_challenge.startTime) + timedelta(\n minutes=duel_challenge.timeLimit)\n remaing_time = total_time - current_localtime()\n difference_minutes = remaing_time.total_seconds()/60.0\n context_dict['testDuration'] = difference_minutes\n context_dict['isDuel'] = True\n context_dict['duelID'] = duel_id\n elif \"calloutPartID\" in request.POST:\n call_out_part_id = request.POST['calloutPartID']\n call_out_part = CalloutParticipants.objects.get(\n pk=int(call_out_part_id))\n context_dict['challengeName'] = call_out_part.calloutID.challengeID.challengeName\n context_dict['isduration'] = True\n time_left = (datetime_to_local(call_out_part.calloutID.endTime) - current_localtime()).total_seconds() / 60.0\n context_dict['testDuration'] = time_left\n context_dict['isCallout'] = True\n context_dict['calloutPartID'] = call_out_part_id\n else:\n context_dict['challengeName'] = challenge.challengeName\n if challenge.timeLimit == unlimited_constant:\n context_dict['isduration'] = False\n else:\n context_dict['isduration'] = True\n context_dict['testDuration'] = challenge.timeLimit\n context_dict['isDuel'] = False\n\n\n starttimestring = current_localtime().strftime(\"%m/%d/%Y %I:%M:%S %p\")\n context_dict['startTime'] = starttimestring\n\n attemptId = 'challenge:'+challengeId + '@' + starttimestring\n context_dict['attemptId'] = attemptId\n\n sessionDict['challengeId'] = challengeId\n if Challenges.objects.filter(challengeID=challengeId, courseID=currentCourse, isTeamChallenge=True).exists():\n sessionDict['teamLeader'] = checkTeamLeader(context_dict['student'], currentCourse)\n if not challenge.isGraded:\n context_dict['warmUp'] = 1\n else:\n context_dict['warmUp'] = 0\n\n # Checks if password was entered correctly\n if challenge.challengePassword != '':\n if 'password' not in request.POST or request.POST['password'] != challenge.challengePassword:\n return redirect('/oneUp/students/ChallengeDescription?challengeID=' + challengeId)\n\n# if challenge.challengeName == \"Parsons\":\n# context_dict['questionType'] = 'parsons'\n# context_dict['questionText'] = \"Construct a function by drag&dropping and reordering lines from the left to the right.The constructed function should return True if the parameter is True and return False otherwise.\"\n# return render(request,'Students/ChallengeSetup.html', context_dict)\n\n # GGM changed it so that it will now order by the question position\n # this allows us to easily order by randomization in the future\n\n if(challenge.isRandomized):\n # GGM this line is problematic for large data sets\n challenge_questions = ChallengesQuestions.objects.filter(\n challengeID=challengeId).order_by('?')\n else:\n challenge_questions = ChallengesQuestions.objects.filter(\n challengeID=challengeId).order_by(\"questionPosition\")\n for challenge_question in challenge_questions:\n questionObjects.append(challenge_question)\n\n # getting all the question of the challenge except the matching question\n\n qlist = []\n sessionDict['questions'] = []\n for i in range(0, len(questionObjects)):\n q = questionObjects[i]\n qdict = questionTypeFunctions[q.questionID.type]['makeqdict'](\n q.questionID, i+1, challengeId, q, None)\n qlist.append(qdict)\n sessionDict['questions'].append(qdict)\n\n request.session[attemptId] = sessionDict\n # As we set this one, we also take a quick moment to clean up old ones if needed.\n remove_old_challenge_session_entries(request.session)\n context_dict['attemptId'] = attemptId\n context_dict['question_range'] = zip(\n range(1, len(questionObjects)+1), qlist)\n print(\"contents of the qlist\", qlist)\n context_dict['question_ids'] = [i for i in range(1, len(questionObjects)+1)]\n \n #time pressure\n if( challenge.isGraded ):\n context_dict['timePressure'] = ccp.timePressureSerious \n else:\n context_dict['timePressure'] = ccp.timePressureWarmup\n stlist = StudentPlayerType.objects.filter(course=currentCourse, student=student)\n \n if(len(stlist) > 0): \n st = stlist.first()\n if( challenge.isGraded ):\n context_dict['timePressure'] = st,playerType.timePressureSerious\n else:\n context_dict['timePressure'] = st.playerType.timePressureWarmup\n register_event(Event.startChallenge, request, None, challengeId)\n print(\"Registered Event: Start Challenge Event, Student: student in the request, Challenge: \" + challengeId)\n\n context_dict['ckeditor'] = config_ck_editor()\n\n dumpUnusedHints(Student.objects.get(user=request.user))\n return render(request, 'Students/ChallengeSetup.html', context_dict)\n\n#we have to dump out the unused hints:\n#unused hints are defined by hints that do not have a studentchallengeQuestions foreign key attached\n#if we leave them in it causes system instability with garbage data lurking around\n#we must dump any that exist for the user to ensure accurate reporting even if they miss submitting a challenge\n#but they have submitted already some hint requests\n#hints that are used properly have a studentChallengeQuestions foreign key\ndef dumpUnusedHints(student):\n studentAnswerHints = StudentAnswerHints.objects.filter(studentID=student, studentChallengeQuestionID__isnull=True)\n if(len(studentAnswerHints) > 0):\n #dump the hints that didnt get attached to anything\n for studentAnswerHint in studentAnswerHints:\n studentAnswerHint.delete()\n#Checks if user is a team leader\ndef checkTeamLeader(student, course):\n return Teams.objects.filter(courseID=course, teamLeader=student).exists()\n","repo_name":"OneUp-Learning/oneUp","sub_path":"Students/views/challengeSetupView.py","file_name":"challengeSetupView.py","file_ext":"py","file_size_in_byte":10631,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"31004385434","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 3 21:41:36 2018\n\n@author: unmesh\n\"\"\"\nimport numpy as np\nimport matplotlib \nmatplotlib.use('Agg')\nimport matplotlib as mpl\nfrom mpl_toolkits.mplot3d import Axes3D # This import has side effects required for the kwarg projection='3d' in the call to fig.add_subplot\nimport matplotlib.pyplot as plt\nimport random\nimport sys\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom matplotlib.colors import LogNorm\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport os\n\nos.system('rm data_2d.dat')\nos.system('rm data_1d.dat')\n###################################################################################\ntot=np.int(sys.argv[1])\nfold=np.int(sys.argv[2])\nbeads=np.array(str(sys.argv[3]).split(','),int)\nsec=str(sys.argv[4]).split(',')\ntemp=np.array(str(sys.argv[5]).split(','),float)\ndim=np.int(sys.argv[6])\n###################################################################################\nbin1=bin2=100\nconst=0\ndef files(res2):\n f=open(res2,'r')\n a=f.readlines()\n f.close\n \n coord=[]\n for i in range(0,len(a)):\n x=a[i].split()\n coord.append(x)\n coord=np.array(coord,float)\n return (coord)\n\ndef files2(x,nbonds):\n frames=int(len(x)/nbonds)\n d1=[]\n d2=[]\n for i in range(frames):\n for j in range(int(nbonds/2)):\n t=j+(i)*nbonds\n d1.append(x[t])\n del t\n t=j+int(nbonds/2)+(i)*nbonds\n d2.append(x[t])\n del t\n d1=np.array(d1)\n d2=np.array(d2)\n return (d1,d2)\n\ndef cut(xx0,fol,bead,count):\n dd=np.split(xx0,bead)\n s=[]\n for i in range (len(dd[0])):\n for j in range (bead):\n s.append(dd[j][i])\n s=np.array(s,float)\n del dd\n return (s)\n\ndef minima2(X,Y,ggg,sss,count):\n vv1x=[]\n vv1y=[]\n vv1z=[]\n for iii in range(len(ggg)):\n for jjj in range(len(ggg)):\n\n vv1z.append(ggg[iii][jjj])\n vv1x.append(X[iii][jjj])\n vv1y.append(Y[iii][jjj])\n\n vv1z=np.array(vv1z,float)\n vv1x=np.array(vv1x,float)\n vv1y=np.array(vv1y,float)\n \n ax = fig.add_subplot(2,2,count)\n plt.plot([vv1x[np.argmin(vv1z)]],[vv1y[np.argmin(vv1z)]],'--o', markersize=2, lw=1 ,color='black',mfc='white') \n\ndef minima(X,Y,ggg,sss,count):\n vv1z=[]\n vv2z=[]\n vv3z=[]\n vv4z=[]\n vv1x=[]\n vv2x=[]\n vv3x=[]\n vv4x=[]\n vv1y=[]\n vv2y=[]\n vv3y=[]\n vv4y=[]\n vv1s=[]\n vv2s=[]\n vv3s=[]\n vv4s=[]\n for iii in range(len(ggg)):\n for jjj in range(len(ggg)):\n\n if X[iii][jjj] >= 0.0 and Y[iii][jjj] >= 0.0:\n vv1z.append(ggg[iii][jjj])\n vv1x.append(X[iii][jjj])\n vv1y.append(Y[iii][jjj])\n vv1s.append(sss[iii][jjj])\n elif X[iii][jjj] < 0.0 and Y[iii][jjj] >= 0.0:\n vv2z.append(ggg[iii][jjj])\n vv2x.append(X[iii][jjj])\n vv2y.append(Y[iii][jjj])\n vv2s.append(sss[iii][jjj])\n if X[iii][jjj] < 0.0 and Y[iii][jjj] < 0.0:\n vv3z.append(ggg[iii][jjj])\n vv3x.append(X[iii][jjj])\n vv3y.append(Y[iii][jjj])\n vv3s.append(sss[iii][jjj])\n if X[iii][jjj] >= 0.0 and Y[iii][jjj] < 0.0:\n vv4z.append(ggg[iii][jjj])\n vv4x.append(X[iii][jjj])\n vv4y.append(Y[iii][jjj])\n vv4s.append(sss[iii][jjj])\n\n vv1z=np.array(vv1z,float)\n vv2z=np.array(vv2z,float)\n vv3z=np.array(vv3z,float)\n vv4z=np.array(vv4z,float)\n vv1x=np.array(vv1x,float)\n vv2x=np.array(vv2x,float)\n vv3x=np.array(vv3x,float)\n vv4x=np.array(vv4x,float)\n vv1y=np.array(vv1y,float)\n vv2y=np.array(vv2y,float)\n vv3y=np.array(vv3y,float)\n vv4y=np.array(vv4y,float)\n vv1s=np.array(vv1s,float)\n vv2s=np.array(vv2s,float)\n vv3s=np.array(vv3s,float)\n vv4s=np.array(vv4s,float)\n\n print(count-2)\n print (vv1x[np.argmin(vv1z)],vv1y[np.argmin(vv1z)],np.amin(vv1z)*(temp[i-1]/298)*0.02568*1000,vv1s[np.argmin(vv1z)]*(temp[i-1]/298)*0.02568*1000)\n print (vv3x[np.argmin(vv3z)],vv3y[np.argmin(vv3z)],np.amin(vv3z)*(temp[i-1]/298)*0.02568*1000,vv3s[np.argmin(vv3z)]*(temp[i-1]/298)*0.02568*1000)\n\n ax = fig.add_subplot(2,2,count)\n plt.plot([vv1x[np.argmin(vv1z)],vv3x[np.argmin(vv3z)]],[vv1y[np.argmin(vv1z)],vv3y[np.argmin(vv3z)]],'--o', markersize=2, lw=1 ,color='black',mfc='white')\n\n\n# x, y = vv1x[np.argmin(vv1z)], vv1y[np.argmin(vv1z)]\n# text = ' {:.2f}'.format(np.amin(vv1z)*(temp[i-1]/298)*0.02568*1000)\n# ax.annotate(text, xy=(x,y),size=4,weight='bold',color='black')\n\ndef summation(X,Y,ggg):\n vv1z=[]\n vv2z=[]\n vv3z=[]\n vv4z=[]\n for iii in range(len(ggg)):\n for jjj in range(len(ggg)):\n\n if X[iii][jjj] >= 0.0 and Y[iii][jjj] >= 0.0:\n vv1z.append(ggg[iii][jjj])\n elif X[iii][jjj] < 0.0 and Y[iii][jjj] >= 0.0:\n vv2z.append(ggg[iii][jjj])\n if X[iii][jjj] < 0.0 and Y[iii][jjj] < 0.0:\n vv3z.append(ggg[iii][jjj])\n if X[iii][jjj] >= 0.0 and Y[iii][jjj] < 0.0:\n vv4z.append(ggg[iii][jjj])\n\n vv1z=np.array(vv1z,float)\n vv2z=np.array(vv2z,float)\n vv3z=np.array(vv3z,float)\n vv4z=np.array(vv4z,float)\n\n print ('summation')\n print (np.sum(vv1z),np.sum(vv2z),np.sum(vv3z),np.sum(vv4z))\n\n\n##################################################################\n#############1.histogram########################################## \nnew_colour = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f','#bcbd22', '#17becf']\nsec2 = ['(a)','(b)','(c)','(d)','(e)','(f)']\n\nfig = plt.figure()\n\nfor i in range(1,tot+1):\n res2=\"del{}\".format(i)\n coord=files(res2)\n\n x=coord[:,1]-coord[:,2]\n xx0,yy0=files2(x,dim)\n\n fold=1\n x0=np.split(xx0,fold)\n y0=np.split(yy0,fold)\n\n \n ax = fig.add_subplot(2,2,i)\n xedges,yedges=np.histogram2d(x0[0], y0[0], bins=(bin1,bin2),range=[[-2.0,2.0], [-2.0,2.0]], normed=True)[1:]\n xcenters = (xedges[:-1] + xedges[1:]) / 2\n ycenters = (yedges[:-1] + yedges[1:]) / 2\n X, Y = np.meshgrid(xcenters, ycenters)\n dx=(xedges[1]-xedges[0])\n dy=(yedges[1]-yedges[0])\n\n kkk=np.zeros((fold,bin1,bin2),float)\n for j in range(fold):\n gg=np.histogram2d(x0[j], y0[j], bins=(bin1,bin2),range=[[-2.0,2.0], [-2.0,2.0]], normed=True)[0]\n gg=gg.T\n gg [gg == 0] = 10**(-100)\n # gg=-1*(temp[i-1]/298)*0.02568*1000*np.log(gg*dx*dy) #meV\n gg=-1*np.log(gg*dx*dy) #KbT\n gg[gg > 100]=np.inf\n kkk[j]=gg\n\n ggg=np.mean(kkk,axis=0)\n sss=np.std(kkk,axis=0)\n\n summation(X,Y,np.exp(-1*ggg))\n\n v=np.amin(ggg)\n ggg=ggg-v\n\n#######################################################################################################################\n e1=[]\n e2=[]\n for kk in range(bin1):\n e1.append(X[kk][kk])\n e2.append(ggg[kk][kk])\n\n e1=np.array(e1)\n e2=np.array(e2)\n\n for ij in range(bin1):\n f=open('data_1d.dat','a')\n f.write(\"%.3f \\t %.3f \\n\" %(e2[ij],e1[ij]))\n f.close()\n f=open('data_1d.dat','a')\n f.write(\"%.s \\n\" %('&'))\n f.close()\n\n\n for ij in range(bin1):\n for ik in range(bin2):\n f=open('data_2d.dat','a')\n f.write(\"%.3f \\t %.3f \\t %.3f \\t %.3f \\n\" %(ggg[ij][ik],sss[ij][ik],X[ij][ik],Y[ij][ik]))\n f.close()\n f=open('data_2d.dat','a')\n f.write(\"%.s \\n\" %('&'))\n f.close()\n\n\n if i > 1:\n minima(X,Y,ggg,sss,i)\n else:\n minima2(X,Y,ggg,sss,i)\n\n plt.xticks(np.arange(-1.5,1.6,0.5),size=10,rotation=45)\n plt.yticks(np.arange(-1.5,1.6,0.5),size=10)\n plt.xlabel(r'$\\delta_1$ (Å)',size=12)\n if i > 0:\n plt.ylabel(r'$\\delta_2$ (Å)',size=12)\n plt.xlim(-1.4,1.4)\n plt.ylim(-1.4,1.4)\n plt.axhline(y=0.0, color = 'k',linewidth=1, linestyle = '--')\n plt.axvline(x=0.0, color = 'k',linewidth=1, linestyle = '--')\n\n plt.contourf(X, Y, ggg, [0,1,2,3,4,5,6,7,8,9,10],cmap=plt.cm.get_cmap('jet'))\n CS=ax.contour(X, Y, ggg, [0,1,2,3,4,5,6,7,8,9,10],cmap=plt.cm.get_cmap('jet'))\n\n cbar = plt.colorbar(ticks=None,shrink=1.0,pad=0.0 )\n cbar.ax.tick_params(labelsize=10)\n if i == 2:\n cbar.set_label(r'Free Energy ($k_BT$) ', size=10)\n\n\n\n\n del X,Y,xcenters,ycenters,xedges,yedges,x,x0,y0\n\n####################################################################################################################################\nfor i in range(1,tot+1):\n\n res2=\"ddel{}\".format(i)\n coord=files(res2)\n\n x=coord[:,1]-coord[:,2]\n xx0,yy0=files2(x,dim)\n\n fold=1\n xxx0=cut(xx0,fold,beads[i-1],i)\n yyy0=cut(yy0,fold,beads[i-1],i)\n\n x0=np.split(xxx0,fold)\n y0=np.split(yyy0,fold)\n\n\n ax = fig.add_subplot(2,2,i+2)\n xedges,yedges=np.histogram2d(x0[0], y0[0], bins=(bin1,bin2),range=[[-2.0,2.0], [-2.0,2.0]], normed=True)[1:]\n xcenters = (xedges[:-1] + xedges[1:]) / 2\n ycenters = (yedges[:-1] + yedges[1:]) / 2\n X, Y = np.meshgrid(xcenters, ycenters)\n dx=(xedges[1]-xedges[0])\n dy=(yedges[1]-yedges[0])\n\n kkk=np.zeros((fold,bin1,bin2),float)\n for j in range(fold):\n gg=np.histogram2d(x0[j], y0[j], bins=(bin1,bin2),range=[[-2.0,2.0], [-2.0,2.0]], normed=True)[0]\n gg=gg.T\n gg [gg == 0] = 10**(-100)\n # gg=-1*(temp[i-1]/298)*0.02568*1000*np.log(gg*dx*dy) #meV\n gg=-1*np.log(gg*dx*dy) #KbT\n gg[gg > 100]=np.inf\n kkk[j]=gg\n\n ggg=np.mean(kkk,axis=0)\n sss=np.std(kkk,axis=0)\n \n summation(X,Y,np.exp(-1*ggg))\n\n v=np.amin(ggg)\n ggg=ggg-v\n\n#######################################################################################################################\n e1=[]\n e2=[]\n for kk in range(bin1):\n e1.append(X[kk][kk])\n e2.append(ggg[kk][kk])\n\n e1=np.array(e1)\n e2=np.array(e2)\n\n for ij in range(bin1):\n f=open('data_1d.dat','a')\n f.write(\"%.3f \\t %.3f \\n\" %(e2[ij],e1[ij]))\n f.close()\n f=open('data_1d.dat','a')\n f.write(\"%.s \\n\" %('&'))\n f.close()\n\n\n for ij in range(bin1):\n for ik in range(bin2):\n f=open('data_2d.dat','a')\n f.write(\"%.3f \\t %.3f \\t %.3f \\t %.3f \\n\" %(ggg[ij][ik],sss[ij][ik],X[ij][ik],Y[ij][ik]))\n f.close()\n f=open('data_2d.dat','a')\n f.write(\"%.s \\n\" %('&'))\n f.close()\n\n minima(X,Y,ggg,sss,i+2)\n legend=sec[i-1] \n\n plt.xticks(np.arange(-1.5,1.6,0.5),size=10,rotation=45)\n plt.yticks(np.arange(-1.5,1.6,0.5),size=10)\n plt.xlabel(r'$\\delta_1$ (Å)',size=12)\n if i > 0:\n plt.ylabel(r'$\\delta_2$ (Å)',size=12)\n plt.xlim(-1.4,1.4)\n plt.ylim(-1.4,1.4)\n plt.axhline(y=0.0, color = 'k',linewidth=1.0, linestyle = '--')\n plt.axvline(x=0.0, color = 'k',linewidth=1.0, linestyle = '--')\n\n plt.contourf(X, Y, ggg, [0,1,2,3,4,5,6,7,8,9,10],cmap=plt.cm.get_cmap('jet'))\n CS=ax.contour(X, Y, ggg, [0,1,2,3,4,5,6,7,8,9,10],cmap=plt.cm.get_cmap('jet'))\n# ax.clabel(CS, inline=True, fontsize=10)\n cbar = plt.colorbar(ticks=None,shrink=1.0,pad=0.0 )\n cbar.ax.tick_params(labelsize=10)\n if i == 2:\n cbar.set_label(r'Free Energy ($k_BT$) ', size=10)\n\n\n\n\n del X,Y,xcenters,ycenters,xedges,yedges,x,x0,y0\n\n\n\n\n\n####################################################################################################################################\n\nplt.subplots_adjust(wspace=0.5,hspace=0.7)\nfig.savefig('fig_conv_1_1.pdf')\nplt.savefig(\"fig_conv_1_1.jpg\", dpi=300, bbox_inches = 'tight', pad_inches = 0.1)\n#plt.show()\n\n","repo_name":"unmesh1993/PhD_IISER","sub_path":"PhD_projects/TPA/ANALYSIS_PLOTTING/JPDF_H_H/start_temp_1_avg.py","file_name":"start_temp_1_avg.py","file_ext":"py","file_size_in_byte":11721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39110280711","text":"from django.contrib.auth.models import User\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom ckeditor_uploader.fields import RichTextUploadingField\nfrom django.conf import settings\n\nfrom concurrency.models import ConcurrentModel\n\n\nclass Resource(ConcurrentModel):\n title = models.CharField(\n blank=False,\n max_length=140,\n )\n description = RichTextUploadingField(\n blank=True,\n max_length=5000\n )\n link = models.CharField(\n unique=True,\n null=True, # Must have null=True else unique=True will throw errors at blank links\n blank=True,\n max_length=500,\n )\n tags = models.ManyToManyField(\n 'resource.Tag',\n related_name='resources',\n blank=True\n )\n medium = models.CharField(\n max_length=150,\n blank=False,\n )\n grade = models.CharField(\n max_length=150,\n blank=False,\n )\n content_creator = models.CharField(\n max_length=150,\n blank=False,\n )\n creation_date = models.DateTimeField(\n default=timezone.now\n )\n added_by_user = models.ForeignKey(\n User,\n null=True,\n on_delete=models.DO_NOTHING,\n related_name='created_resources'\n )\n starred_by = models.ManyToManyField(\n User,\n blank=True,\n related_name='starred_resources',\n )\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse(\"resource_detail\", kwargs={'pk': self.pk})\n\n class Meta:\n ordering = ('-creation_date', )\n\n\nclass Tag(models.Model):\n name = models.CharField(\n blank=False,\n unique=True,\n max_length=50,\n )\n\n def __str__(self):\n return self.name\n","repo_name":"CogitoNTNU/web","sub_path":"resource/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"10764330384","text":"# Stock Picker Script\n\nimport yfinance as yf\n\ndef pick_stocks(tickers, num_stocks):\n selected_stocks = []\n \n for ticker in tickers:\n # Fetch stock data using yfinance\n data = yf.download(ticker, period='1y')\n \n if data.empty:\n print(f\"No data available for {ticker}. Skipping...\")\n continue\n \n # Calculate one-year return\n start_price = data['Close'].iloc[0]\n end_price = data['Close'].iloc[-1]\n one_year_return = (end_price - start_price) / start_price\n \n # Add stock to selected stocks if positive return\n if one_year_return > 0:\n selected_stocks.append((ticker, one_year_return))\n \n # Sort stocks by return and select top N\n selected_stocks.sort(key=lambda x: x[1], reverse=True)\n top_stocks = selected_stocks[:num_stocks]\n \n return top_stocks\n\n# Example usage\ntickers = ['AAPL', 'GOOGL', 'AMZN', 'MSFT', 'META'] # List of stock tickers\nnum_stocks = 5 # Number of stocks to pick\n\ntop_picks = pick_stocks(tickers, num_stocks)\nfor stock in top_picks:\n ticker, return_pct = stock\n print(f\"{ticker}: {return_pct * 100:.2f}% return in the past year\")","repo_name":"Sarthak-Katyal/Stock-Screener","sub_path":"Stock Picker Script.py","file_name":"Stock Picker Script.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37248254635","text":"class Solution:\n\n def removeNthFromEnd(self, head: Optional[ListNode],\n n: int) -> Optional[ListNode]:\n result = ListNode(0, head)\n # Thst's where left starts...It's not nessesary to be 0\n # And the neaxt is head\n # Definition for singly-linked list.\n # class ListNode:\n # def __init__(self, val=0, next=None):\n # self.val = val\n # self.next = next\n left = result\n right = head\n\n while n > 0 and right:\n right = right.next\n n -= 1\n\n# Move the right pointer to the \"right\" place\n while right:\n left = left.next\n right = right.next\n\n left.next = left.next.next\n return result.next\n\n\n# 3/27 我寫的,待改(下有正確版)\n# if not head:return head\n# length = 1\n# cur = head\n# while cur:\n# cur = cur.next\n# length += 1\n\n# if length == 1 and n >=1:\n# return None\n\n# n = n % length\n\n# if n == 0:\n# return head\n\n# cur = head\n# for i in range(length - n - 2):\n# cur = cur.next\n# if cur and cur.next:\n# cur.next = cur.next.next\n\n# return head\n\n\n# 正確版\nclass Solution:\n\n def removeNthFromEnd(self, head: Optional[ListNode],\n n: int) -> Optional[ListNode]:\n sz = 0\n node = head\n\n while node:\n node = node.next\n sz += 1\n\n # it's guaranteed that 1 <= n <= sz\n if n == sz:\n return head.next\n else: # 2 <= n <= sz -1\n\n node = head\n prev, curr = node, node.next\n for _ in range(sz - n - 1):\n prev, curr = curr, curr.next\n prev.next = prev.next.next\n return head\n","repo_name":"YiruDing/LeetcodePractice","sub_path":"Neetcode305/Linked_List/19_Remove_Nth_Node_From_End_of_List(Udemy中文亦有).py","file_name":"19_Remove_Nth_Node_From_End_of_List(Udemy中文亦有).py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"16037063953","text":"import copy\nfrom DangDang_Scrapy.dbhelper import DBHelper\n\n\nclass DangdangScrapyPipeline(object):\n\n # 连接数据库\n def __init__(self):\n self.db = DBHelper()\n\n # 对 item 字段进行处理\n def process_item(self, item, spider):\n item['rank'] = item['rank'][:item['rank'].index('.')]\n item['comment_num'] = item['comment_num'][:item['comment_num'].index('条')]\n item['price_n'] = item['price_n'][item['price_n'].index('¥')+1:]\n item['price_s'] = item['price_s'][:item['price_s'].index('折')]\n item['discount'] = item['discount'][:item['discount'].index('折')]\n asynItem = copy.deepcopy(item)\n self.db.dbpool.runInteraction(self.db.insert_to_db, asynItem)\n return item\n\n\n\n","repo_name":"zoukun120/DangDang_Scrapy","sub_path":"DangDang_Scrapy/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16356627964","text":"# 완주 여행 키워드 분석 워드클라우드 \n#Step 1. 필요한 모듈을 실행합니다.\nfrom konlpy.tag import * #pip install konlpy 먼저 하세요\nimport matplotlib.pyplot as plt #pip install matplotlib 먼저 하세요\nfrom matplotlib import font_manager , rc\nfrom wordcloud import WordCloud # pip install wordcloud 먼저 하세요\nfrom collections import Counter\nimport nltk #자연어 처리를 위한 파이썬 패키지\nokt = Okt()\nkkma = Kkma( )\n\n \n#Step 2 . 텍스트 파일을 불러와서 형태소 분석을 합니다.\ndata1 = open(\"c:\\\\data\\\\완주여행_2017.txt\").read( )\ndata1\n\nprint(data1)\nprint(\"\\n\")\n\n#Step 3. 키워드를 추출합니다\ndata2 = okt.nouns(data1)\n#data2 = kkma.nouns(data1)\nprint(\"1.추출된 키워드:\", data2)\nprint(len(data2))\n\n# Step 4. 용어 정리 작업하기\ndata3=[]\nfor a in data2 :\n if a == \"와일드\" or a==\"축제\":\n data3.append(a.replace(\"와일드\",\"와일드푸드축제\"))\n elif a==\"로컬\" :\n data3.append(a.replace(\"로컬\",\"로컬푸드\"))\n elif a==\"메뚜기\" :\n data3.append(a.replace(\"메뚜기\",\"메뚜기구이\"))\n elif a==\"푸드\" :\n data3.append(a.replace(\"푸드\",\" \"))\n else :\n data3.append(a) \nprint(data3)\n\n#Step 5. 추출된 단어들의 빈도를 조사한 후 많이 언급된 100개만 출력합니다\nprint(\"\\n\")\ndata4 = Counter(data3)\ndata5 = data4.most_common(50)\n\nprint(\"2.단어별 빈도수:\",data5)\n\n#Step 6. 불용어 제거하기 :분석을 하는 것에 있어서는 큰 도움이 되지 않는 단어 제거\nsword = open(\"c:\\\\data\\\\와푸gsub.txt\").read()\n#print(sword)\ndata6 = [ each_word for each_word in data3\n if each_word not in sword ]\nprint(data6)\n\n#Step 7. 글자수로 불용어 제거하기\ndata7 = []\nfor i in data6 :\n if len(i) >= 2 and len(i) <= 10 :\n data7.append(i) \nprint(data7)\n\n# Step 8. 단어별 빈도수 집계하기\ndata8 = Counter(data7)\ndata9 = data8.most_common(50)\nprint(data9)\ndata10 = dict(data9)\n\n#Step 9. 워드 클라우드 그리기\nwordcloud = WordCloud(font_path=\"C:\\Windows\\Fonts\\H2GTRE.TTF\" ,\n relative_scaling=0.4,\n background_color=\"white\"\n ).generate_from_frequencies(data10)\nplt.figure(figsize=(10,10))\nplt.imshow(wordcloud)\nplt.axis('off') \nplt.show() ","repo_name":"SEONGJAE-YOO/python-project.","sub_path":"wanchinpa/wordcloud2.py","file_name":"wordcloud2.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1594249429","text":"import os\nimport sys\nimport time\nimport json\nfrom json import load\nimport sys\nimport logging\nimport subprocess \nfrom ping3 import ping, verbose_ping\nifaces = ['enp0s3','enp0s8','enp0s9','enp0s10']\ndef ping_main(): \n if \"enp\" in a[\"fo\"][\"main_iface\"]:\n ping_main1 = ping(\"8.8.8.8\", interface = a[\"fo\"][\"main_iface\"],timeout = int(threshold))\n ping_main2 = ping(\"8.8.8.8\", interface = a[\"fo\"][\"main_iface\"],timeout = int(threshold))\n ping_main3 = ping(\"8.8.8.8\", interface = a[\"fo\"][\"main_iface\"],timeout = int(threshold))\n ping_main4 = ping(\"8.8.8.8\", interface = a[\"fo\"][\"main_iface\"],timeout = int(threshold))\n if ping_main1 or ping_main2 or ping_main3 or ping_main4 == float:\n ping_main = 1\n else:\n ping_main = None\n else:\n ping_main = None \n return ping_main \n\ndef ping_sec():\n if \"enp\" in a[\"fo\"][\"sec_iface\"]:\n # ping_sec = 1\n ping_sec1 = ping(\"8.8.8.8\", interface = a[\"fo\"][\"sec_iface\"],timeout = int(threshold))\n ping_sec2 = ping(\"8.8.8.8\", interface = a[\"fo\"][\"sec_iface\"],timeout = int(threshold))\n ping_sec3 = ping(\"8.8.8.8\", interface = a[\"fo\"][\"sec_iface\"],timeout = int(threshold))\n ping_sec4 = ping(\"8.8.8.8\", interface = a[\"fo\"][\"sec_iface\"],timeout = int(threshold))\n if ping_sec1 or ping_sec2 or ping_sec3 or ping_sec4 == float:\n ping_sec = 1\n else:\n ping_sec = None\n else:\n ping_sec = None\n return ping_sec\n\ndef ping_backup():\n if \"enp\" in a[\"fo\"][\"backup_iface\"]:\n # ping_backup = 1\n ping_backup1 = ping(\"8.8.8.8\", interface = a[\"fo\"][\"sec_iface\"],timeout = int(threshold))\n ping_backup2 = ping(\"8.8.8.8\", interface = a[\"fo\"][\"sec_iface\"],timeout = int(threshold))\n ping_backup3 = ping(\"8.8.8.8\", interface = a[\"fo\"][\"sec_iface\"],timeout = int(threshold))\n ping_backup4 = ping(\"8.8.8.8\", interface = a[\"fo\"][\"sec_iface\"],timeout = int(threshold))\n if ping_backup1 or ping_backup2 or ping_backup3 or ping_backup4 == float:\n ping_backup = 1\n else:\n ping_backup = None\n else:\n ping_backup = None\n return ping_backup\n\nif __name__ == '__main__':\n\n while True:\n info_json = {\"main_iface\":{\"status\":'Idle'},\"sec_iface\":{\"status\":'Idle'},\"backup_iface\":{\"status\":'Idle'}}\n current_config_jsonFile = open('/home/pp/linux_bridge_PR/5Gjump/current_config.json','r')\n a = json.load(current_config_jsonFile)\n\n no_use = []\n for i in ifaces:\n if i != a[\"fo\"][\"main_iface\"] and i != a[\"fo\"][\"sec_iface\"] and i != a[\"fo\"][\"backup_iface\"]:\n no_use.append(i)\n threshold = a[\"fo\"]['latency'][\"threshold\"]\n detection_period = a[\"fo\"]['latency'][\"detection_period\"]\n\n ping_main1 = ping_main()\n ping_sec1 = ping_sec()\n ping_backup1 = ping_backup()\n print(\"ping_main\")\n print(ping_main1)\n print(\"ping_sec\")\n print(ping_sec)\n print(\"ping_backup1\")\n print(ping_backup1)\n\n\n time.sleep(int(detection_period))\n","repo_name":"P0oxox/linux_bridge_PR","sub_path":"5Gjump/try01.py","file_name":"try01.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31174942195","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n***************************************************************************\r\n* Copyright (c) 2022 *\r\n* Shai Seger <shaise[at]gmail> *\r\n* *\r\n* This file is a supplement to the FreeCAD CAx development system. *\r\n* *\r\n* This program is free software; you can redistribute it and/or modify *\r\n* it under the terms of the GNU Lesser General Public License (LGPL) *\r\n* as published by the Free Software Foundation; either version 2 of *\r\n* the License, or (at your option) any later version. *\r\n* for detail see the LICENCE text file. *\r\n* *\r\n* This software is distributed in the hope that it will be useful, *\r\n* but WITHOUT ANY WARRANTY; without even the implied warranty of *\r\n* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *\r\n* GNU Library General Public License for more details. *\r\n* *\r\n* You should have received a copy of the GNU Library General Public *\r\n* License along with this macro; if not, write to the Free Software *\r\n* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *\r\n* USA *\r\n* *\r\n***************************************************************************\r\n\"\"\"\r\nfrom screw_maker import *\r\nimport FastenerBase\r\n\r\n\r\ndef makeSquareNut(self, fa):\r\n \"\"\"Creates a nut with 4 wrenching flats, that may optionally have a\r\n chamfer on its top face.\r\n Supported types:\r\n - DIN 557 square nuts\r\n - DIN 562 square thin nuts\r\n - ASME B18.2.2 square nuts\r\n - ASME B18.2.2 square machine screw nuts (small sizes)\r\n \"\"\"\r\n SType = fa.type\r\n dia = self.getDia(fa.calc_diam, True)\r\n if SType == 'DIN557':\r\n s, m, di, dw, P = fa.dimTable\r\n top_chamfer = True\r\n elif SType == 'DIN562':\r\n s, m, di, P = fa.dimTable\r\n top_chamfer = False\r\n elif SType == \"ASMEB18.2.2.1B\":\r\n P, _, _, m, s = fa.dimTable\r\n top_chamfer = False\r\n elif SType == \"ASMEB18.2.2.2\":\r\n TPI, F, H = fa.dimTable\r\n P = 1 / TPI * 25.4\r\n s = F * 25.4\r\n dw = s\r\n m = H * 25.4\r\n top_chamfer = True\r\n # create the nut body using a recantular prism primitive\r\n nut = Part.makeBox(s, s, m, Base.Vector(-s / 2, -s / 2, 0.0))\r\n # subtract the internal bore from the nut using a revolved solid\r\n do = dia * 1.1\r\n inner_rad = dia / 2 - P * 0.625 * sqrt3 / 2\r\n inner_cham_ht = math.tan(math.radians(15)) * (do / 2 - inner_rad)\r\n fm = FastenerBase.FSFaceMaker()\r\n fm.AddPoint(0.0, 0.0)\r\n fm.AddPoint(do / 2, 0.0)\r\n fm.AddPoint(inner_rad, inner_cham_ht)\r\n fm.AddPoint(inner_rad, m - inner_cham_ht)\r\n fm.AddPoint(do / 2, m)\r\n fm.AddPoint(0.0, m)\r\n hole = self.RevolveZ(fm.GetFace())\r\n nut = nut.cut(hole)\r\n # add a chamfer on one side of the outer corners if needed\r\n if top_chamfer:\r\n cham_solid = Part.makeCone(dw / 2 + m * math.sqrt(3), dw / 2, m)\r\n nut = nut.common(cham_solid)\r\n # cut modeled threads if needed\r\n if fa.thread:\r\n thread_cutter = self.CreateInnerThreadCutter(dia, P, m + P)\r\n nut = nut.cut(thread_cutter)\r\n return nut\r\n","repo_name":"shaise/FreeCAD_FastenersWB","sub_path":"FsFunctions/FSmakeSquareNut.py","file_name":"FSmakeSquareNut.py","file_ext":"py","file_size_in_byte":3729,"program_lang":"python","lang":"en","doc_type":"code","stars":231,"dataset":"github-code","pt":"81"} +{"seq_id":"15455636588","text":"class Solution:\n def isMatch2(self, s: str, p: str) -> bool:\n last_star_index = 0\n for i in range(len(p)):\n if p[i] != '*':\n break\n else:\n last_star_index = i \n \n \n if last_star_index > 0 :\n p = p[last_star_index:]\n if s == '' and (p == '' or p == '*'):\n return True\n \n if len(p) == 0 or (len(s) == 0 and p != '*'):\n return False\n\n\n if s[0] == p[0] or p[0] == '?':\n return self.isMatch2(s[1:], p[1:])\n elif p[0] == '*':\n for i in range(len(s) + 1):\n if self.isMatch2(s[i:], p[1:]):\n return True\n \n return False\n else:\n return False\n\n def isMatch(self, s: str, p: str) -> bool:\n len_p = len(p)\n len_s = len(s)\n\n dp = [[0] * (len_p+1) for _ in range(len_s+1)]\n\n dp[0][0] = 1\n\n for x in range(1, len_p+1):\n if p[x-1] == '*':\n dp[0][x] = dp[0][x-1]\n print(dp)\n for i in range(1, len_s+1):\n for j in range(1, len_p+1):\n if s[i-1] == p[j-1] or p[j-1] == '?':\n dp[i][j] = dp[i-1][j-1]\n elif p[j-1] == '*':\n dp[i][j] = dp[i][j-1] or dp[i-1][j]\n print(dp)\n\n return bool(dp[-1][-1])\n\nif __name__ == \"__main__\":\n sol = Solution()\n # print(sol.isMatch(\"aa\", \"*\"))\n print(sol.isMatch( \"abceb\", \"*a*b\"))\n # print(sol.isMatch(\"babbbbaabababaabbababaababaabbaabababbaaababbababaaaaaabbabaaaabababbabbababbbaaaababbbabbbbbbbbbbaabbb\", \"b**bb**a**bba*b**a*bbb**aba***babbb*aa****aabb*bbb***a\") )\n #print(sol.isMatch(\"bbaabbbbaaaabaabbbbabababaabaaaabaaabbaabaabaaabbabaabbbbbbbbbbaababbabaabbabaababbaaaabbbbaaaaaababbbbabbaababbabbabbababbbbabbbbaabaaabbaababbbaaaaababbbabbaaaaababbbaabbaabbbbbbbbbaababaababbababbabaa\", \"*b****abb***bbba**b*baaa****ba*ab***a*ab**a*a***aabbabb*bb**b***bbbbab****b*ba*baa*b*aa*b*b***a*bbab*\" ))","repo_name":"sqlxx/algo-python","sub_path":"wildcard_matching.py","file_name":"wildcard_matching.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37744174969","text":"#!/bin/env python3\n\nimport os\nimport json\n\n\nprint('生成配置文件...', end='')\n\nROOT_DIR = os.path.dirname(__file__)\n\nwith open(ROOT_DIR + '/conf/vmess.json', encoding='utf-8') as f:\n vmess_conf = []\n for l in f: vmess_conf.append(l.rsplit('//', 1)[0].strip())\nvmess_conf = json.loads('\\n'.join(vmess_conf))\n\nwith open(ROOT_DIR + '/res/v2ray.json', encoding='utf-8') as f:\n v2ray_conf = []\n for l in f: v2ray_conf.append(l.rsplit('//', 1)[0].strip())\nv2ray_conf = json.loads('\\n'.join(v2ray_conf))\nv2ray_conf['outbounds'] = [vmess_conf]\n\nv2ray_conf = json.dumps(v2ray_conf, indent=2)\nwith open(ROOT_DIR + '/conf/v2ray.json', 'w') as f:\n f.write(v2ray_conf)\n\nprint('ok')\n\nprint('重启容器内v2ray进程...\\n')\nos.system(\"docker exec v2ray-l2tp sh /root/restart-v2ray.sh\")\nprint('\\nv2ray配置已更新')","repo_name":"dorofeix/V2L2TP","sub_path":"use_vmess.py","file_name":"use_vmess.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13043188012","text":"def pega_periodo():\r\n '''\r\n Função que obtém o período de monitoramento das temperaturas \r\n retorno: periodo -> lista contendo o número de dias(colunas) e de semanas(linhas) obtidos pelo usuário\r\n '''\r\n dias = int(input('Dias por semana: '))\r\n semanas = int(input('Semanas: '))\r\n periodo = [dias, semanas]\r\n return periodo\r\n\r\n\r\ndef cria_matriz(periodo):\r\n '''\r\n (int, int) -> temperaturas : matriz \r\n Recebe o número de linhas e colunas e cria uma matriz com essas dimensões, \r\n preenhce com as temperaturas médias no período e retorna a matriz\r\n '''\r\n temperaturas = []\r\n for i in range(periodo[1]):\r\n semanas = []\r\n for j in range(periodo[0]):\r\n print(f'{i + 1}ª semana, {j + 1}º dia')\r\n semanas.append(float(input(f'Temperatura: ')))\r\n temperaturas.append(semanas)\r\n return temperaturas\r\n\r\n\r\ndef pega_menor_maior(temperaturas):\r\n '''\r\n Função que pega a menor e a maior temperatura em uma matriz de temperaturas\r\n parâmetro: temperaturas -> Matriz contendo temperaturas\r\n retorno: menor_maior -> Lista contendo a menor e a maior temperatura respectivamente\r\n '''\r\n menor = temperaturas[0][0]\r\n maior = temperaturas[0][0]\r\n for semana in temperaturas:\r\n for temp in semana:\r\n if temp > maior:\r\n maior = temp\r\n if temp < menor:\r\n menor = temp\r\n menor_maior = [menor, maior]\r\n return menor_maior\r\n\r\ndef separa_negativas(temperaturas):\r\n '''\r\n Função que cria um array com os valores negativos de uma matriz\r\n parâmetro: temperaturas -> Matriz contendo temperaturas\r\n retorno: negativas -> Array contendo os valores negativos da matriz\r\n '''\r\n negativas = []\r\n for semana in temperaturas:\r\n for temp in semana:\r\n if temp < 0:\r\n negativas.append(temp)\r\n return negativas\r\n\r\ndef soma(temperaturas):\r\n '''\r\n Função que calcula a soma das temperaturas em uma matriz\r\n parâmetro: temperaturas -> Matriz contendo temperaturas\r\n retorno: soma -> soma das temperaturas da matriz\r\n '''\r\n soma = 0\r\n for semana in temperaturas:\r\n for temp in semana:\r\n soma += temp\r\n return soma\r\n\r\ndef media_temp(temperaturas):\r\n '''\r\n Função que calcula a média das temperaturas em uma matriz\r\n parâmetro: temperaturas -> Matriz contendo temperaturas\r\n retorno: media -> média das temperaturas da matriz\r\n '''\r\n # quantidade de semanas * quantidade de dias na semana(temperaturas)\r\n media = soma(temperaturas) / (len(temperaturas) * len(temperaturas[0]))\r\n return media\r\n\r\ndef imprimir_dados(temperaturas, menor_maior, negativas, media):\r\n '''\r\n Função que imprime os dados obtidos\r\n parâmetro: temperaturas -> Matriz contendo temperaturas\r\n parâmetro: menor_maior -> Lista contendo a menor e a maior temperatura respectivamente\r\n parâmetro: negativas -> Array contendo os valores negativos da matriz\r\n parâmetro: media -> média das temperaturas da matriz\r\n '''\r\n print(f'Temperaturas: {temperaturas}\\n')\r\n print(f'Menor temperatura: {menor_maior[0]}°C | Maior temperatura: {menor_maior[1]}°C\\n')\r\n print(f'Temperaturas negativas: {negativas}\\n')\r\n print(f'Média das temperaturas: {media}°C')\r\n\r\ndef main():\r\n periodo = pega_periodo()\r\n temperaturas = cria_matriz(periodo)\r\n menor_maior = pega_menor_maior(temperaturas)\r\n negativas = separa_negativas(temperaturas)\r\n media = media_temp(temperaturas)\r\n imprimir_dados(temperaturas, menor_maior, negativas, media)\r\n\r\n# Chamada da função principal\r\nmain()","repo_name":"RafaRamosCosta/Exercicio-Matriz-Python","sub_path":"temperaturas.py","file_name":"temperaturas.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27201018478","text":"numbers = list(range(2,11,2))\nprint(numbers)\n\nsquares = []\nfor value in range(1,11):\n square = value ** 2\n squares.append(square)\nprint(squares)\n\n# more concisely\n\nsquares = []\nfor value in range(1,11):\n squares.append(value**2)\nprint(squares)\nprint(min(squares))\nprint(max(squares))\nprint(sum(squares))\n\ndays = [value**2 for value in range(1,11)]\nprint(days)\n\n\nfor numbers in range(1,21):\n print(numbers)\n\nmillions = []\nfor digits in range(1,1000001):\n millions.append(digits)\nprint(min(millions))\nprint(max(millions))\nprint(sum(millions))\n\nodd = []\nfor number in range(0,21,3):\n odd.append((number))\nprint(odd)\n\nthrees = []\nfor three in range(3,31,3):\n threes.append(three)\nprint(threes)\n\n# using expressions\ncubes= []\nfor cube in range(1,11):\n value = cube**3\n cubes.append(value)\nprint(cubes)\n\n\n# list comprehension\nnew_cube = [cube**3 for cube in range(1,11)]\nprint(new_cube[0:11:5])\n\n\n","repo_name":"brianmina/python_course","sub_path":"chapter_4_lists/numerical_list.py","file_name":"numerical_list.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3598743363","text":"import os\nimport re\n\ndef create_html_page():\n # Read the HTML template file\n with open(\"automate-blog/template.html\", \"r\", encoding=\"utf-8\") as file:\n template = file.read()\n\n # Get user input\n writer_name = input(\"Enter the writer's name: \")\n date = input(\"Enter the date: \")\n tags = []\n for i in range(4):\n tag = input(f\"Enter tag {i+1}: \")\n tags.append(tag)\n\n num_paragraphs = int(input(\"Enter the number of paragraphs: \"))\n title = input(\"Enter the blog title: \")\n introduction = input(\"Enter the introduction: \")\n\n index_items = []\n paragraphs = []\n for i in range(num_paragraphs):\n paragraph_title = input(f\"Enter title for paragraph {i + 1}: \")\n\n # Get the image filename\n image_filename = input(f\"Enter the image filename for paragraph {i + 1}: \")\n\n # Construct the image src URL\n image_src = f'https://divine-education.com/img/blog-post/{image_filename}'\n\n # Construct the image div code\n image_div = f'<div class=\"col-6\">\\n\\t<img class=\"img-fluid\" src=\"{image_src}\" alt=\"{paragraph_title}\">\\n</div>'\n\n paragraph_input = input(f\"Enter paragraph {i + 1}: \")\n paragraph_lines = paragraph_input.splitlines()\n paragraph = \"\"\n for line in paragraph_lines:\n paragraph += f\"<p>{line}</p>\\n\"\n index_items.append(f\"<li>{paragraph_title}</li>\")\n paragraphs.append(f\"{image_div}\\n<h2>{paragraph_title}</h2>\\n{paragraph}\")\n\n conclusion = input(\"Enter the conclusion: \")\n\n # Replace placeholders in the template with user input\n template = template.replace(\"<!--writer-->\", writer_name)\n template = template.replace(\"<!--date-->\", date)\n template = template.replace(\"<!--tag1-->\", tags[0])\n template = template.replace(\"<!--tag2-->\", tags[1])\n template = template.replace(\"<!--tag3-->\", tags[2])\n template = template.replace(\"<!--tag4-->\", tags[3])\n template = template.replace(\"<!--title-->\", title)\n template = template.replace(\"<!--intro-->\", introduction)\n template = template.replace(\"<!--index-->\", \"<ul>\" + \"\\n\".join(index_items) + \"</ul>\")\n template = template.replace(\"<!--paragraphs-->\", \"\\n\".join(paragraphs))\n template = template.replace(\"<!--conclusion-->\", conclusion)\n\n # Create a clean directory name from the blog title\n clean_title = re.sub(r\"[^\\w\\s-]\", \"\", title)\n clean_title = re.sub(r\"^\\s+|\\s+$\", \"\", clean_title)\n clean_title = re.sub(r\"\\s+\", \"_\", clean_title)\n\n # Save the final HTML page inside the 'blog' directory\n output_file_path = os.path.join(\"blog\", f\"{clean_title}.html\")\n with open(output_file_path, \"w\", encoding=\"utf-8\") as file:\n file.write(template)\n\n print(f\"HTML page created successfully! Saved as {output_file_path}\")\n\n# Call the function to create the HTML page\ncreate_html_page()\n","repo_name":"itzCodeItIshant/DivineAcademy","sub_path":"automate-blog/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70089916104","text":"dct = {((1, 4), \"mic\"), ((4, 8), \"mediu\"), ((8, 15), \"mare\")}\ndct = dict(dct)\nlst = [\"bau-bau\", \"bobocel\", \"14 pisici\", \"1pitic\", \"pisicel\", \"botosel\", \"414\", \"ham\", \"-hau\", \"bob\", \"bocceluta\"]\nif __name__ == '__main__':\n for (mn, mx), value in dct.items():\n print(mn, mx, value)\n\n ans = dict()\n for char in set(\"\".join(lst)):\n ans[char] = [word for word in lst if char in word]\n\n print(len(ans.keys()))\n for char in set(x for x in \"\".join(lst) if x.isalpha() == False and x.isnumeric() == False):\n del ans[char]\n print(ans)\n\n print(len(ans.keys()))\n\n for key, value in ans.items():\n for word in value:\n for (mn, mx), value in dct.items():\n if mn < len(word) < mx:\n print(dct[(mn, mx)], end=\" \")\n print()\n","repo_name":"cyber-gh/AI-lab","sub_path":"lab1/prob10.py","file_name":"prob10.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21299494805","text":"n = int(input())\r\nseat = []\r\n\r\nfor i in range(n):\r\n s = list(map(int, input().split()))\r\n for j in range(n):\r\n if s[j] == 2:\r\n sung = (i, j)\r\n elif s[j] == 5:\r\n pro = (i, j)\r\n seat.append(s)\r\n\r\ndis = ((sung[0] - pro[0]) ** 2 + (sung[1] - pro[1]) ** 2) # 성규랑 교수 사이의 거리\r\n\r\nif dis >= 25: # 거리가 5이상이면\r\n cnt = 0 # 성규랑 교수님 사이에 있는 행과 열 카운트\r\n for i in range(min(sung[0], pro[0]), max(sung[0], pro[0]) + 1):\r\n for j in range(min(sung[1], pro[1]), max(sung[1], pro[1]) + 1):\r\n if seat[i][j] == 1:\r\n cnt += 1\r\n '''교수의 위치가 (a,b), 성규의 위치가 (c,d)라고 하면 반복문의 수도코드는 다음과 같다. arr은 입력받은 배열이다.\r\n (a-c)**2 + (b-d)**2 >= 25\r\n\r\n for i가 (min(a, c)부터 max(a, c)까지):\r\n for j가 (min(b, d)부터 max(m, d)까지):\r\n arr[i][j]가 1이라면 학생이므로 카운팅'''\r\n \r\n if cnt >= 3: # 만날 수 있음\r\n print('1')\r\n else:\r\n print(\"0\")\r\nelse:\r\n print(\"0\")","repo_name":"rhoeunbin/Algorithm","sub_path":"백준/Silver/18221. 교수님 저는 취업할래요/교수님 저는 취업할래요.py","file_name":"교수님 저는 취업할래요.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19185714421","text":"# https: // www.cs.cmu.edu/~adamchik/15-121/lectures/Binary % 20Heaps/heaps.html\n# https://www.cs.cmu.edu/~adamchik/15-121/lectures/Binary%20Heaps/code/Heap.java\n\n\nclass MinHeap:\n \"\"\"\n The value of each node is >= the value of its parent\n The min value is at the root\n \"\"\"\n\n def __init__(self, array=[]):\n # import to use list(), check https://docs.python-guide.org/writing/gotchas/\n self.heap = list(array)\n if len(self.heap) > 1:\n self.build_heap()\n\n def build_heap(self):\n last_parent = (len(self.heap) - 2) // 2\n for i in reversed(range(last_parent + 1)):\n self.sift_down(i)\n\n def insert(self, item):\n self.heap.append(item)\n self.sift_up(len(self.heap) - 1)\n\n def delete_min(self):\n if len(self.heap) == 0:\n raise Exception(\"heap is empty.\")\n\n minimum = self.heap[0]\n self.heap[0] = self.heap[-1]\n del self.heap[-1]\n self.sift_down(0)\n return minimum\n\n def sift_up(self, k):\n if k < 0 or k >= len(self.heap):\n return\n tmp = self.heap[k]\n\n while (k-1) // 2 >= 0:\n parent = (k-1) // 2\n\n if self._greater(self.heap[parent], tmp):\n self.heap[k] = self.heap[parent]\n else:\n break\n k = parent\n\n self.heap[k] = tmp\n\n def sift_down(self, k):\n if k < 0 or k >= len(self.heap):\n return\n tmp = self.heap[k]\n size = len(self.heap)\n\n while 2 * k + 1 < size:\n child = 2 * k + 1\n\n # pick the smaller child\n if child < size - 1 and self._greater(self.heap[child], self.heap[child+1]):\n child += 1\n if self._greater(tmp, self.heap[child]):\n self.heap[k] = self.heap[child]\n else:\n break\n k = child\n self.heap[k] = tmp\n\n def _greater(self, a, b):\n return a > b\n\n\ndef main():\n test = [6, 7, 12, 10, 15, 17]\n test_heap = MinHeap(test)\n print(test_heap.heap)\n test_heap.insert(5)\n print(test_heap.heap)\n\n test = [3, 1, 6, 5, 2, 4]\n print(test)\n test_heap2 = MinHeap(test)\n print(test_heap2.heap)\n for _ in range(len(test)):\n print(\"min: {}\".format(test_heap2.delete_min()))\n print(\"heap: {}\".format(test_heap2.heap))\n\n# expected output:\n#\n# [6, 7, 12, 10, 15, 17]\n# [5, 7, 6, 10, 15, 17, 12]\n# [3, 1, 6, 5, 2, 4]\n# [1, 2, 4, 5, 3, 6]\n# min: 1\n# heap: [2, 3, 4, 5, 6]\n# min: 2\n# heap: [3, 5, 4, 6]\n# min: 3\n# heap: [4, 5, 6]\n# min: 4\n# heap: [5, 6]\n# min: 5\n# heap: [6]\n# min: 6\n# heap: []\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pololee/oj-leetcode","sub_path":"companies/square/p218/MinHeap.py","file_name":"MinHeap.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36042169020","text":"\n\n\ndef clear_words(words):\n words_cleared=[]\n for word in words:\n if ',.:;!?-'.find(word[len(word)-1])>=0:\n word_cleared=word[:len(word)-1]\n if len(word_cleared)>0:\n words_cleared.append(word_cleared.lower())\n else:\n words_cleared.append(word.lower())\n \n return words_cleared\n\n\ndef add_statistic(statistic,word):\n if len(statistic):\n for record in statistic:\n if record[0]==word:\n record[1]+=1\n return\n statistic.append([word,1]) \n \n \ndef create_statistic(words):\n statistic=[]\n \n for word in clear_words(words):\n add_statistic(statistic,word) \n \n return statistic\n \ndef search_statistic_min(statistic):\n min_index=0\n min_value=statistic[min_index][1]\n \n for index in range(len(statistic)):\n if statistic[index][1]<min_value:\n min_index=index\n min_value=statistic[index][1]\n\n return min_value\n \n\n\ndef task():\n \n words=input(\"Enter text\").split()\n \n statistic=create_statistic(words)\n\n min_repeat=search_statistic_min(statistic)\n \n\n result=[]\n \n for record in statistic:\n if record[1]==min_repeat:\n result.append(record[0])\n \n result.sort()\n print(result)\n \ntask()\n\n\n\n\n\n\n\n\n\n","repo_name":"igortereshchenko/python","sub_path":"words_statistic.py","file_name":"words_statistic.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39674898348","text":"from __future__ import annotations\n\nimport random\nimport shutil\n\nfrom argparse import ArgumentParser\nfrom copy import copy\nfrom dataclasses import dataclass, fields, is_dataclass\nfrom math import floor, pi\nfrom pathlib import Path\n\n\n@dataclass\nclass Config:\n data_dir: Path\n output_dir: Path\n train_split: float\n\n\ndef partition_dataset(config: Config):\n random.seed(round(pi * 1e5))\n\n # Safely delete pre-existing output directory.\n if config.output_dir.exists():\n assert all(map(lambda x: x.is_dir() and x.name in [\"train\", \"test\"], config.output_dir.iterdir()))\n shutil.rmtree(config.output_dir)\n config.output_dir.mkdir(parents=True)\n\n for class_dir in sorted(config.data_dir.glob(\"*/\")):\n print(\"\\n{}/{}\".format(class_dir.parent.name, class_dir.name))\n\n train_dir, test_dir = prepare_output_dirs(config.output_dir, class_dir)\n\n class_files = sorted(class_dir.glob(\"*.png\"))\n train_files, test_files = train_test_split(class_files, config.train_split)\n\n print(f\" total: {len(class_files):>4d}\")\n print(f\" train: {len(train_files):>4d}\")\n print(f\" test: {len(test_files):>4d}\")\n\n for idx, file_path in enumerate(train_files, start=1):\n shutil.copyfile(file_path, train_dir / f\"{idx:04d}.png\")\n\n for idx, file_path in enumerate(test_files, start=1):\n shutil.copyfile(file_path, test_dir / f\"{idx:04d}.png\")\n\n\ndef prepare_output_dirs(output_dir: Path, class_dir: Path):\n train_dir = config.output_dir / \"train\" / class_dir.name\n test_dir = config.output_dir / \"test\" / class_dir.name\n\n train_dir.mkdir(parents=True)\n test_dir.mkdir(parents=True)\n\n return train_dir, test_dir\n\n\ndef train_test_split(data, train_split):\n data = copy(data)\n n_train = floor(len(data) * train_split)\n\n random.shuffle(data)\n train_data, test_data = data[:n_train], data[n_train:]\n\n return train_data, test_data\n\n\ndef parse_args():\n parser = ArgumentParser()\n\n parser.add_argument(\"--data-dir\", type=Path, required=True, help=\"path to unpartitioned data directory\")\n parser.add_argument(\"--output-dir\", type=Path, required=True, help=\"path to output directory\")\n parser.add_argument(\"--train-split\", type=float, default=0.8, help=\"fraction of data used for training dataset\")\n\n return parser.parse_args()\n\n\ndef dataclass_from_dict(klass, data):\n if is_dataclass(klass):\n field_types = {f.name: f.type for f in fields(klass)}\n assert all([k in field_types.keys() for k in data])\n return klass(**{k: dataclass_from_dict(field_types[k], data[k]) for k in data})\n\n return data\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n config = dataclass_from_dict(Config, vars(args))\n partition_dataset(config)\n","repo_name":"thomasjo/nemo-redux","sub_path":"src/preprocessing/partition_dataset.py","file_name":"partition_dataset.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14375728016","text":"#!/usr/bin/python3\n\n\"\"\"\nSimple HTTP Server.\n\nFeatures:\n- Listens for incoming HTTP requests.\n- Handles basic HTTP methods (GET).\n- Serves requested files from the \"resources\" directory.\n- Sends appropriate HTTP responses (200 OK, 404 Not Found).\n- Handles multiple requests at the same time.\n\"\"\"\n\n# Libraries ------------------------------------------------------------------>\n\nimport os\nimport sys\nfrom signal import SIGINT, signal\nfrom socket import AF_INET, SOCK_STREAM, socket\nfrom threading import Thread\nfrom typing import List\n\n# Authorship ----------------------------------------------------------------->\n\n__author__ = \"Kirill Shkirov\"\n__license__ = \"MIT\"\n__email__ = \"kichkiro@student.42firenze.it\"\n__slack__ = \"kichkiro\"\n__status__ = \"Finished\"\n\n# Functions ------------------------------------------------------------------>\n\n\ndef signal_handler(_signal_received: int, __frame: object) -> None:\n \"\"\"\n Handles the SIGINT signal.\n If SIGINT signal is received, the server will be stopped.\n\n Params\n --------------------------------------------------------------------\n None\n\n Returns\n --------------------------------------------------------------------\n None\n \"\"\"\n print(\"\\nServer has been stopped.\")\n os.kill(os.getpid(), 9)\n\n\ndef connection_handler(conn_socket: socket) -> None:\n \"\"\"\n Handles the connection with a client.\n\n Params\n --------------------------------------------------------------------\n conn_socket : socket\n The socket object representing the connection.\n\n Returns:\n --------------------------------------------------------------------\n None\n \"\"\"\n message: str\n resource: str\n outputdata: str\n\n try:\n message = conn_socket.recv(1024).decode()\n resource = message.split()[1]\n\n # Open and read the file if it exists -------------------------------->\n with open(f\"resources/{resource}\", \"r\", encoding=\"utf-8\") as file:\n outputdata = file.read()\n\n # Send HTTP headers into socket -------------------------------------->\n conn_socket.send(\"HTTP/1.1 200 OK\\r\\n\".encode())\n conn_socket.send(\"Content-Type: text/html\\r\\n\".encode())\n conn_socket.send(f\"Content-Length: {len(outputdata)}\\r\\n\".encode())\n conn_socket.send(\"\\r\\n\".encode())\n\n # Send the content of the requested file to the client --------------->\n for _i, data in enumerate(outputdata):\n conn_socket.send(data.encode())\n conn_socket.send(\"\\r\\n\".encode())\n print(f\"Resource: {resource} - has been sent.\")\n\n conn_socket.close()\n except (IOError, IndexError):\n # Send response message for file not found --------------------------->\n conn_socket.send(\"HTTP/1.1 404 Not Found\\r\\n\".encode())\n conn_socket.close()\n print(\"File not found.\")\n\n\ndef main(argv: List[str]) -> None:\n \"\"\"\n Main function that starts the server.\n\n Params\n --------------------------------------------------------------------\n argv: List[str]\n List of command line arguments.\n\n Returns\n --------------------------------------------------------------------\n None\n \"\"\"\n server_socket: socket\n server_port: int\n conn_socket: socket\n addr: tuple\n thread: Thread\n\n if len(argv) != 2:\n print(f\"Usage: {argv[0]} <port>\")\n sys.exit()\n\n # Prepare a sever socket ------------------------------------------------->\n server_socket = socket(AF_INET, SOCK_STREAM)\n server_port = int(argv[1])\n server_socket.bind(('', server_port))\n server_socket.listen(1)\n\n while True:\n\n # Establish the connection ------------------------------------------->\n print('\\nReady to serve...')\n conn_socket, addr = server_socket.accept()\n print(f\"Connection from {addr} has been established.\")\n\n # Creating new thread for handle the connection ---------------------->\n thread = Thread(target=connection_handler, args=(conn_socket, ))\n thread.start()\n\n\nif __name__ == \"__main__\":\n signal(SIGINT, signal_handler)\n main(sys.argv)\n","repo_name":"kichkiro/python_socket","sub_path":"web_server/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69962080585","text":"n = int(input(\"Enter a number: \"))\nrev = 0\nsum = 0\ntemp = n\nwhile temp != 0:\n rem = temp % 10\n rev = rev * 10 + rem\n sum += rem\n temp //= 10\n\nprint(\"\\nReverse of \", n, \"is\", rev)\nprint(\"Sum of digits is\", sum)\n","repo_name":"athuld/PythonPrograms","sub_path":"RecordPrograms/program2.py","file_name":"program2.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27896270493","text":"# Import necessary modules\r\nfrom colorama import Fore, Back, Style, init\r\n\r\n# Initialize colorama\r\ninit(strip=False)\r\ninit(autoreset=True)\r\n\r\n# Define a class for map coloring\r\nclass map_coloring():\r\n # Colors to be used for coloring the map\r\n colors = [Fore.RED+'Red', Fore.GREEN+'Green',\r\n Fore.YELLOW+'Yellow', Fore.MAGENTA+'Violet']\r\n\r\n # Define the map\r\n states = ['A', 'B', 'C', 'D', 'E', 'F', 'G']\r\n neighbors = {}\r\n neighbors['A'] = ['B', 'C', 'D']\r\n neighbors['B'] = ['A', 'C']\r\n neighbors['C'] = ['A', 'B', 'D', 'E']\r\n neighbors['D'] = ['A', 'C', 'F', 'E']\r\n neighbors['E'] = ['F', 'C', 'D']\r\n neighbors['F'] = ['E', 'D', 'G']\r\n neighbors['G'] = ['F']\r\n\r\n # Dictionary to store the colors of each state\r\n colors_of_states = {}\r\n\r\n # Function to print the map\r\n def print_graph(self):\r\n \"\"\"\r\n This function prints the map with its neighbors.\r\n \"\"\"\r\n for key in self.neighbors:\r\n print(Fore.CYAN + key + Fore.WHITE + ' -> ', self.neighbors[key])\r\n\r\n # Function to check if a color can be assigned to a state\r\n def promising(self, state, color):\r\n \"\"\"\r\n This function checks if a color can be assigned to a state by checking if any of its neighbors\r\n have already been assigned the same color.\r\n\r\n Args:\r\n state (str): The state to be checked.\r\n color (str): The color to be assigned to the state.\r\n\r\n Returns:\r\n bool: True if the color can be assigned to the state, False otherwise.\r\n \"\"\"\r\n for neighbor in self.neighbors.get(state):\r\n color_of_neighbor = self.colors_of_states.get(neighbor)\r\n if color_of_neighbor == color:\r\n return False\r\n return True\r\n\r\n # Function to get a color for a state\r\n def get_color_for_state(self, state):\r\n \"\"\"\r\n This function gets a color for a state by checking if the color can be assigned to the state.\r\n\r\n Args:\r\n state (str): The state to be assigned a color.\r\n\r\n Returns:\r\n str: The color to be assigned to the state.\r\n \"\"\"\r\n for color in self.colors:\r\n if self.promising(state, color):\r\n return color\r\n\r\n # Function to start the map coloring process\r\n def start(self):\r\n \"\"\"\r\n This function starts the map coloring process by printing the map, assigning colors to each state,\r\n and printing the solution.\r\n \"\"\"\r\n print(Fore.BLUE+\"\\n\\n\\t\\tThe Graph Is \")\r\n self.print_graph()\r\n print(\"\\n\\n\")\r\n for state in self.states:\r\n self.colors_of_states[state] = self.get_color_for_state(state)\r\n print(\r\n f\"Color Used For State {state} is {self.colors_of_states[state]}\")\r\n print(Fore.BLUE+\"\\n\\n\\t\\tThe Solution Is - \")\r\n for key in self.colors_of_states:\r\n print(Fore.BLUE+key + Fore.WHITE +\r\n ' -> ', self.colors_of_states[key])\r\n\r\n# Create an instance of the map_coloring class and start the map coloring process\r\ntemp = map_coloring()\r\ntemp.start()","repo_name":"Aditi069/Sem6","sub_path":"AILAB/Ass5_1.py","file_name":"Ass5_1.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30593391007","text":"import os\nimport random\nfrom crm import settings\nfrom django.shortcuts import render,HttpResponse,redirect\nfrom crm01 import form\nfrom crm01 import models\nfrom django.views import View\nfrom PIL import Image, ImageDraw, ImageFont\nfrom django.http import JsonResponse\nfrom django.urls import reverse\nfrom django.contrib import auth\nfrom crm01 import page\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Q\n@login_required #认证装饰器\ndef index(request):\n return render(request,'index.html')\n\n#注册\ndef register(request):\n if request.method == 'GET':\n form_obj = form.UserForm()\n return render(request,'register.html',{'form_obj':form_obj})\n else:\n form_obj = form.UserForm(request.POST)\n if form_obj.is_valid():\n # print(form_obj.cleaned_data) #将没有用的字段删除\n data=form_obj.cleaned_data\n data.pop('r_password')\n models.UserInfo.objects.create_user(**data) #create_user密文创建\n return redirect('login')\n else:\n return render(request,'register.html',{'form_obj':form_obj})\n#登录\ndef login(request):\n response_msg = {'code':None,'msg':None}\n if request.method == 'GET':\n return render(request,'login.html')\n else:\n username = request.POST.get('username')\n password = request.POST.get('password')\n valid_bode = request.POST.get('valid_code')\n #首先验证验证码是否正确\n if valid_bode.upper() == request.session.get('valid_str').upper():\n user_obj = auth.authenticate(username=username,password=password)\n if user_obj:\n auth.login(request,user_obj)\n response_msg['code']=1000\n response_msg['msg'] = '登录成功'\n # return redirect('index')\n else:\n response_msg['code'] = 1002\n response_msg['msg'] ='用户名或者密码错误'\n else:\n response_msg['code'] = 1001\n response_msg['msg'] = '验证码输入错误'\n return JsonResponse(response_msg)\n#注销\ndef logout(request):\n auth.logout(request)\n return redirect('login')\n\n# def starter(request):\n# pass\n#验证码\ndef get_valid_img(request):\n\n def get_random_color():\n return (random.randint(0,255),random.randint(0,255),random.randint(0,255))\n img_obj = Image.new('RGB', (200, 34), get_random_color()) #图片对象\n draw_obj = ImageDraw.Draw(img_obj) #通过图片对象生成一个画笔对象\n font_path = os.path.join(settings.BASE_DIR,'statics/font/NAUERT__.TTF') #获取字体,注意有些字体文件不能识别数字,所以如果数字显示不出来,就换个字体\n font_obj = ImageFont.truetype(font_path,16) #创建字体对象\n sum_str = '' #这个数据就是用户需要输入的验证码的内容\n for i in range(6):\n a = random.choice([str(random.randint(0,9)), chr(random.randint(97,122)), chr(random.randint(65,90))]) #4 a 5 D 6 S\n sum_str += a\n print(sum_str)\n draw_obj.text((64,10),sum_str,fill=get_random_color(),font=font_obj) #通过画笔对象,添加文字\n\n width=200\n height=34\n # 添加噪线\n for i in range(5): #添加了5条线\n #一个坐标表示一个点,两个点就可以连成一条线\n x1=random.randint(0,width)\n x2=random.randint(0,width)\n y1=random.randint(0,height)\n y2=random.randint(0,height)\n draw_obj.line((x1,y1,x2,y2),fill=get_random_color())\n # # 添加噪点\n for i in range(10):\n #这是添加点,50个点\n draw_obj.point([random.randint(0, width), random.randint(0, height)], fill=get_random_color())\n #下面是添加很小的弧线,看上去类似于一个点,50个小弧线\n x = random.randint(0, width)\n y = random.randint(0, height)\n draw_obj.arc((x, y, x + 4, y + 4), 0, 90, fill=get_random_color()) #x, y是弧线的起始点位置,x + 4, y + 4是弧线的结束点位置\n\n from io import BytesIO\n f = BytesIO() #操作内存的把手\n img_obj.save(f,'png') #\n data = f.getvalue()\n\n # 存这个验证码的方式1:赋值给全局变量的简单测试\n # global valid_str\n # valid_str = sum_str\n # 方式2:将验证码存在各用户自己的session中,session的应用其实还有很多\n request.session['valid_str'] = sum_str\n return HttpResponse(data)\n\n#查看所有的公户信息信息\n# def customers(request):\n# all_customers = models.Customer.objects.all()\n# return render(request,'customers.html',{'all_customers':all_customers})\n\n#查看所有公户信息 ‘销售为空’\n# def customers(request):\nclass CustomerView(View):#用CBV的方式做 公共客户信息展示及批量操作\n def get(self,request):\n wd = request.GET.get('wd','')\n condition = request.GET.get('condition','')\n\n all_customers = models.Customer.objects.filter(consultant__isnull=True)\n if wd:\n q = Q()\n q.children.append((condition,wd))\n all_customers = all_customers.filter(q)\n current_page_num = request.GET.get('page',1)\n per_page_counts = 10 # 每页显示10条\n page_number = 5 # 总共显示5个页码\n total_count = all_customers.count()\n page_obj = page.PageNation(request.path,current_page_num,request,total_count,per_page_counts,page_number)\n all_customers = all_customers.order_by('-pk')[page_obj.start_num:page_obj.end_num]\n ret_html = page_obj.page_html()\n return render(request,'customers.html',{'all_customers':all_customers,'ret_html':ret_html})\n def post(self,request):\n self.data = request.POST.getlist('selected_id')\n action = request.POST.get('action')\n if hasattr(self,action):\n func = getattr(self,action)\n if callable(func):\n ret = func(request)\n if ret:\n return ret\n return redirect('customers')\n else:\n return HttpResponse('NO')\n else:\n return HttpResponse('NO')\n #批量删除\n def batch_del(self,request):\n models.Customer.objects.filter(pk__in=self.data).delete()\n #批量更新\n def batch_update(self,request):\n models.Customer.objects.filter(pk__in=self.data).update(name='大白')\n #批量公户转私户\n def batch_g_s(self,request):\n batch_customer = models.Customer.objects.filter(pk__in=self.data)\n res = []\n for i in batch_customer:\n if i.consultant:\n res.append(i)\n else:\n i.consultant =request.user\n i.save()\n if res:\n res_str = ','.join([(i.qq + ':'+i.name) for i in res])\n return HttpResponse(res_str+'信息被其他人选走')\n models.Customer.objects.filter(pk__in=self.data).update(consultant=request.user)\n\n#查看当前登录的用户 负责的客户信息\n# def mycustomers(request):\n# mycustomers = models.Customer.objects.filter(consultant=request.user)\n# return render(request,'mycustomers.html',{'mycustomers':mycustomers})\nclass MyCustomerView(View):#用CBV的方式做 我的客户信息展示及批量操作\n def get(self,request):\n wd = request.GET.get('wd','')\n condition = request.GET.get('condition','')\n\n all_my_customers = models.Customer.objects.filter(consultant=request.user)\n if wd:\n q = Q()\n q.children.append((condition,wd))\n all_my_customers = all_my_customers.filter(q)\n current_page_num = request.GET.get('page',1)\n per_page_counts = 10 # 每页显示10条\n page_number = 5 # 总共显示5个页码\n total_count = all_my_customers.count()\n page_obj = page.PageNation(request.path,current_page_num,request,total_count,per_page_counts,page_number)\n all_my_customers = all_my_customers.order_by('-pk')[page_obj.start_num:page_obj.end_num]\n ret_html = page_obj.page_html()\n return render(request,'mycustomers.html',{'all_my_customers':all_my_customers,'ret_html':ret_html})\n def post(self,request):\n self.data = request.POST.getlist('selected_id')\n action = request.POST.get('action')\n if hasattr(self,action):\n func = getattr(self,action)\n if callable(func):\n func(request)\n return redirect('mycustomers')\n else:\n return HttpResponse('NO')\n else:\n return HttpResponse('NO')\n\n # 批量删除\n def batch_del(self, request):\n models.Customer.objects.filter(pk__in=self.data).delete()\n\n # 批量更新\n def batch_update(self, request):\n models.Customer.objects.filter(pk__in=self.data).update(name='小白')\n\n # 批量私户转公户\n def batch_s_g(self, request):\n models.Customer.objects.filter(pk__in=self.data).update(consultant=None)\n\n#客户跟进记录\nclass Follow_Up_Records(View):\n def get(self,request,pk=None):\n wd = request.GET.get('wd','')\n condition = request.GET.get('condition','')\n if pk:\n all_records = models.ConsultRecord.objects.filter(customer_id=pk)\n else:\n all_records = models.ConsultRecord.objects.filter(consultant=request.user)\n if wd:\n q = Q()\n q.children.append((condition,wd))\n all_records = all_records(q)\n current_page_num = request.GET.get('page',1)\n per_page_counts = 10\n page_number = 5\n total_count = all_records.count()\n page_obj = page.PageNation(request.path,current_page_num,request,per_page_counts,page_number,total_count)\n all_records = all_records.order_by('-pk')[page_obj.start_num:page_obj.end_num]\n ret_html = page_obj.page_html()\n return render(request, 'Follow_Up_Records.html', {'all_records':all_records, 'ret_html':ret_html})\n\n def post(self, request):\n self.data = request.POST.getlist('selected_id')\n action = request.POST.get('action')\n if hasattr(self, action):\n func = getattr(self, action)\n if callable(func):\n func(request)\n return redirect('Follow_Up_Records')\n else:\n return HttpResponse('NO')\n else:\n return HttpResponse('NO')\n\n # 批量删除\n\n def batch_del(self, request):\n models.Customer.objects.filter(pk__in=self.data).delete()\n\n # 批量更新跟进信息\n\n def batch_update(self, request):\n models.Customer.objects.filter(pk__in=self.data).update(name='小白')\n\nclass AddFollow_Up_Records(View):\n def get(self,request):\n form_obj = form.ConsultRecordModelForm(request)\n return render(request, 'addFollow_Up_Records.html', {'form_obj':form_obj})\n def post(self,request):\n form_obj = form.ConsultRecordModelForm(request,request.POST)\n if form_obj.is_valid():\n form_obj.save()\n return redirect('Follow_Up_Records')\n else:\n return render(request, 'Follow_Up_Records.html', {'form_obj':form_obj})\n\nclass EditFollow_Up_Records(View):\n def get(self,request,pk):\n followobj = models.ConsultRecord.objects.filter(pk=pk).first() #.first() 拿对象\n form_obj = form.ConsultRecordModelForm(request,instance=followobj)\n return render(request, 'editFollow.html', {'form_obj':form_obj})\n def post(self,request,pk):\n followobj = models.ConsultRecord.objects.filter(pk=pk).first()\n form_obj = form.ConsultRecordModelForm(request,request.POST,instance=followobj)#QueryDict\n if form_obj.is_valid():\n form_obj.save()\n return redirect('Follow_Up_Records')\n else:\n return render(request, 'Follow_Up_Records.html', {'form_obj':form_obj})\n\n\n\n# def test(request):\n# wd = request.GET.get('wd','')\n# condition = request.GET.get('condition','')\n# condition = condition + '__condition'\n# current_page_num = request.GET.get('page',1)\n# if wd:\n# # all_data = models.Customer.objects.filter(Q(qq__contains=wd) | Q(name__contains=wd))\n# q = Q()\n# q .connector='or'\n# q.children.append((condition,wd))\n# q.children.append(('qq_name_contains','王'))\n# all_data = models.Customer.objects.filter(q)\n# else:\n# all_data = models.Customer.objects.all()\n# per_page_counts = 10 #每页显示10条\n# page_number = 5 #总共显示5个页码\n# all_data = models.Customer.objects.all()\n# total_count = all_data.count()\n# # ret_html,start_num,end_num = page.pagenation(request.path, current_page_num,total_count,per_page_counts,page_number)\n# p_obj = page.PageNation(request.path, current_page_num,total_count,per_page_counts,page_number)\n# ret_html = p_obj.page_html()\n# all_data = models.Customer.objects.all()[p_obj.start_num:p_obj.end_num]\n# return render(request,'test.html',{'all_data':all_data,'ret_html':ret_html})\n\n#增加\n#通过modelform创建\nclass AddCustomer(View):\n def get(self,request):\n form_obj = form.CustomerModelForm()\n return render(request,'addcustomer.html',{'form_obj':form_obj})\n def post(self,request):\n form_obj = form.CustomerModelForm(request.POST)\n if form_obj.is_valid():\n form_obj.save() # 添加数据的时候,modelform实例化一个对象, 只有is_valid()通过验证,再调用save()就自动保存了\n return redirect('customers')\n else:\n return render(request,'addcustomer.html',{'form_obj':form_obj})\n\n# 老师版\n# class AddCustomer(View):\n# #获取添加页面\n# def get(self,request):\n# form_obj = form.CustomerModelForm()\n# return render(request,'addcustomer.html',{'form_obj':form_obj})\n# def post(self,request):\n# form_obj = form.CustomerModelForm(request.POST)\n# #{'qq':'11111','name':'xiaohei'}\n# if form_obj.is_valid():\n# form_obj.save()\n# return redirect('customers')\n# else:\n# return render(request,'addcustomer.html',{'form_obj':form_obj})\n\n#编辑\nclass Editcustomer(View):\n def get(self,request,pk):\n custome_obj = models.Customer.objects.filter(pk=pk).first()\n form_obj = form.CustomerModelForm(instance=custome_obj)\n return render(request,'editcustomer.html',{'form_obj':form_obj})\n def post(self,request,pk):\n custome_obj = models.Customer.objects.filter(pk=pk).first()\n form_obj = form.CustomerModelForm(request.POST,instance=custome_obj)\n if form_obj.is_valid():\n form_obj.save() # 添加数据的时候,modelform实例化一个对象, 只有is_valid()通过验证,再调用save()就自动保存了\n return redirect('mycustomers')\n else:\n return render(request,'editcustomer.html',{'form_obj':form_obj})\n#删除\nclass Delcustomer(View):\n def get(self,request,pk):\n models.Customer.objects.filter(pk=pk).delete()\n return redirect('customers')\n\n","repo_name":"Lwk1071373366/zdh","sub_path":"crm/crm01/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6778654140","text":"if __name__ != \"__main__\":\n raise Exception(\"run must be executed directly\")\n\nimport os.path\nimport subprocess\nimport sys\n\ndef pathsetup():\n prefix = sys.path[0]\n trailer = \"/sectracker_test\"\n if not os.path.exists(prefix + \"/run.py\") \\\n or prefix[-len(trailer):] != trailer:\n raise Exception(\"cannot find path to ourselves\")\n path = sys.path[:]\n path[0] = prefix[:-len(trailer)]\n return (prefix, path)\n(ourpath, pythonpath) = pathsetup()\nos.chdir(ourpath + \"/..\")\n\nenv = {}\nenv.update(os.environ)\nenv[\"PYTHONPATH\"] = \":\".join(pythonpath)\n\nfiles = os.listdir(ourpath)\nfiles.sort()\nerrors = False\nfor name in files:\n if name[-3:] != \".py\" or name == \"run.py\":\n continue\n fullpath = \"%s/%s\" % (ourpath, name)\n print(\"* Running\", name)\n p = subprocess.Popen((\"python3\", \"--\", fullpath), env=env)\n ret = p.wait()\n if ret != 0:\n print(\"Test exited with status\", ret)\n print()\n errors = errors or ret != 0\nif errors:\n print(\"ERROR: some tests aborted with errors\")\n sys.exit(1)\n","repo_name":"CVEDB/security-tracker","sub_path":"lib/python/sectracker_test/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72942157065","text":"import glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nfrom time import perf_counter as timerpc\nimport yaml\n\nfrom floris.tools import FlorisInterface, UncertaintyInterface\nfrom flasc.visualization import plot_floris_layout\n\n\ndef load_floris(wake_model=\"cc\", wd_std=0.0):\n \"\"\"Load a FlorisInterface object for the wind farm at hand.\n\n Args:\n wake_model (str, optional): The wake model that FLORIS should use. Common\n options are 'cc', 'gch', 'jensen' and 'turbopark'. Defaults to \"cc\".\n operation_modes (array, optional): Array or list of integers denoting each\n turbine's operation mode. When None is specified, will assume each turbine\n is in its first operation mode (0). Defaults to None.\n wd_std (float, optional): Uncertainty; standard deviation in the inflow\n wind direction in degrees. Defaults to 0.0 deg meaning no uncertainty.\n\n Returns:\n FlorisInterface: Floris object.\n \"\"\"\n\n # Use the local FLORIS GCH/CC model for the wake model settings\n root_path = os.path.dirname(os.path.abspath(__file__))\n fn = os.path.join(root_path, \"{:s}.yaml\".format(wake_model))\n\n # Now assign the turbine locations and information\n layout_x = [1630.222, 1176.733, 816.389, 755.938, 0.0, 1142.24, 1553.102]\n layout_y = [0.0, 297.357, 123.431, 575.544, 647.779, 772.262, 504.711]\n\n # Initialize FLORIS model and format appropriately\n fi = FlorisInterface(fn)\n fi.reinitialize(\n layout_x=layout_x,\n layout_y=layout_y,\n )\n\n # Add uncertainty\n if wd_std > 0.01:\n unc_options = {\n \"std_wd\": wd_std, # Standard deviation for inflow wind direction (deg)\n \"pmf_res\": 1.0, # Resolution over which to calculate angles (deg)\n \"pdf_cutoff\": 0.995, # Probability density function cut-off (-)\n }\n fi = UncertaintyInterface(fi, unc_options=unc_options)\n\n return fi\n\n\nif __name__ == \"__main__\":\n # Load and time the FLORIS model\n t0 = timerpc()\n fi = load_floris()\n print(\"Time spent to load the FLORIS model: {:.2f} s.\".format(timerpc() - t0))\n\n # Show layout\n plot_floris_layout(fi, plot_terrain=False)\n plt.show()\n","repo_name":"NREL/flasc_cookiecutter_template","sub_path":"{{cookiecutter.project_slug}}/python/{{cookiecutter.project_slug}}/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13340075331","text":"from decimal import Decimal\nfrom typing import List\n\n\nclass Config:\n def __init__(self,\n api_key: str,\n api_secret: str,\n log_level: str,\n symbol: str,\n main_coins_list: List[str],\n database_file: str,\n database_update_delay_secs: int,\n database_update_start_record: int,\n main_coin_amount_to_use: Decimal,\n market_analyse_delay: int,\n market_depth: int,\n sell_when_percent_drop: Decimal,\n rise_watch_percent_level: Decimal):\n self.ApiKey = api_key\n self.ApiSecret = api_secret\n self.LogLevel = log_level\n self.Symbol = symbol\n self.MainCoins = main_coins_list\n self.DatabaseFile = database_file\n self.DatabaseUpdateDelay = database_update_delay_secs\n self.DatabaseUpdateStartRecord = database_update_start_record\n self.MainCoinAmountToUse = main_coin_amount_to_use\n self.MarketAnalyseDelay = market_analyse_delay\n self.MarketDepth = market_depth\n self.SellWhenPercentDrop = sell_when_percent_drop\n self.RiseWatchPercentLevel = rise_watch_percent_level\n","repo_name":"mateusz-szczyrzyca/simple-binance-ta-bot","sub_path":"common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27730386350","text":"from __future__ import annotations\nfrom core.state import State\nfrom core import util\nfrom copy import deepcopy\nfrom typing import List, Optional, Tuple, Sequence\nimport random\nimport numpy as np\n\n\ndef generate_grid(grid_w: int,\n grid_h: int,\n ls: float,\n sd: float,\n seed: int):\n\n grid_size = grid_w * grid_h\n Y, X = np.unravel_index(list(range(grid_size)), (grid_h, grid_w))\n\n def sq_exp_kernel(a: np.ndarray,\n b: np.ndarray,\n length_scale: float,\n sd: float):\n return sd**2 * np.exp(-np.sum((a - b)**2)/2/length_scale**2)\n\n # Compute K\n K = np.zeros((grid_size, grid_size))\n for a_1d in range(grid_size):\n for b_1d in range(grid_size):\n a_coord = np.array([Y[a_1d], X[a_1d]])\n b_coord = np.array([Y[b_1d], X[b_1d]])\n K[a_1d, b_1d] = sq_exp_kernel(a_coord, b_coord, ls, sd)\n\n # Sample from multivariate G.\n gen = np.random.default_rng(seed=seed)\n sample = gen.multivariate_normal(mean=np.zeros(grid_size), cov=K)\n sample = sample.reshape((grid_h, grid_w))\n return np.exp(sample)\n\n\nclass TreasureHuntConfig(object):\n def __init__(self,\n rew_pl1: np.ndarray,\n rew_pl2: np.ndarray,\n time_horizon: int):\n self.grid_h, self.grid_w = rew_pl1.shape\n self.rew_pl1 = rew_pl1\n self.rew_pl2 = rew_pl2\n self.time_horizon = time_horizon\n assert rew_pl1.shape == rew_pl2.shape\n\n\nclass TreasureHuntState(State):\n def __init__(self,\n config: TreasureHuntConfig,\n locs_pl1,\n locs_pl2,\n ):\n\n self.config = config\n self.locs_pl1 = locs_pl1\n self.locs_pl2 = locs_pl2\n\n self.rew_pl1, self.rew_pl2 = None, None\n\n @staticmethod\n def init_state(config: TreasureHuntConfig):\n center = (config.grid_h//2, config.grid_w//2)\n return TreasureHuntState(config, [center], [center])\n\n def is_terminal(self):\n return len(self.locs_pl2) == self.config.time_horizon\n\n def player_to_move(self):\n if self.is_terminal():\n return None\n if len(self.locs_pl2) < len(self.locs_pl1):\n return util.PLAYER2\n elif len(self.locs_pl2) == len(self.locs_pl1):\n return util.PLAYER1\n else:\n assert False\n\n def next_state(self, action):\n p = self.player_to_move()\n\n if p == util.PLAYER1:\n next_loc = self.next_loc(self.locs_pl1[-1], action)\n self.locs_pl1.append(next_loc)\n else:\n next_loc = self.next_loc(self.locs_pl2[-1], action)\n self.locs_pl2.append(next_loc)\n\n def actions_and_probs(self):\n return ['u', 'd', 'l', 'r', '-'], None\n\n def rewards(self) -> Tuple[float, float]:\n if not self.is_terminal():\n return None\n else:\n rew_pl1 = self.get_accum_payoffs(util.PLAYER1)\n rew_pl2 = self.get_accum_payoffs(util.PLAYER2)\n\n return (rew_pl1, rew_pl2)\n\n def __str__(self):\n return 'Locs pl1: ' + str(self.locs_pl1) + '\\n' + \\\n 'Locs pl2: ' + str(self.locs_pl2)\n\n def __repr__(self):\n return self.__str__()\n\n def dup(self):\n return TreasureHuntState(self.config,\n deepcopy(self.locs_pl1),\n deepcopy(self.locs_pl2))\n\n def next_loc(self, cur_loc, action):\n next_loc = list(cur_loc)\n if action == 'u':\n if cur_loc[0] > 0:\n next_loc[0] -= 1\n elif action == 'd':\n if cur_loc[0] < self.config.grid_h-1:\n next_loc[0] += 1\n elif action == 'l':\n if cur_loc[1] > 0:\n next_loc[1] -= 1\n elif action == 'r':\n if cur_loc[1] < self.config.grid_w-1:\n next_loc[1] += 1\n elif action == '-':\n pass\n return tuple(next_loc)\n\n def get_accum_payoffs(self, pid) -> float:\n if self.rew_pl1 is None:\n self.rew_pl1 = self.get_cum_rewards(\n [self.locs_pl1, self.locs_pl2], self.config.rew_pl1)\n self.rew_pl2 = self.get_cum_rewards(\n [self.locs_pl1, self.locs_pl2], self.config.rew_pl2)\n\n if pid == util.PLAYER1:\n return self.rew_pl1\n elif pid == util.PLAYER2:\n return self.rew_pl2\n else:\n assert False\n\n def get_cum_rewards(self, locs_lists: List[List], rew_grid) -> float:\n t = 0.\n\n # TODO: set criterion to either use set or list to extract rewards.\n use_set = True\n\n if use_set == True:\n d = set()\n for locs in locs_lists:\n for (y, x) in locs:\n if (y, x) not in d:\n t += rew_grid[y, x]\n d.add((y, x))\n return t\n else:\n visited = [[False for i in range(self.config.grid_w)]\n for j in range(self.config.grid_h)]\n for locs in locs_lists:\n for (y, x) in locs:\n if not visited[y][x]:\n t += rew_grid[y, x]\n visited[y][x] = True\n return t\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n leader_payoff = generate_grid(7, 7, 2, 0.1, 142857)\n follower_payoff = generate_grid(7, 7, 2, 0.1, 428571)\n\n config = TreasureHuntConfig(leader_payoff, follower_payoff, 5)\n s = TreasureHuntState.init_state(config)\n\n while(not s.is_terminal()):\n # Take random action\n acts, probs = s.actions_and_probs()\n\n if s.player_to_move() == util.CHANCE:\n act_id = random.choices(range(len(acts)), weights=probs)[0]\n else:\n act_id = random.randint(0, len(acts)-1)\n action = acts[act_id]\n\n s.next_state(action)\n\n print(s)\n print('---')\n\n print(s.rewards())\n","repo_name":"lingchunkai/learn-epf-sefce","sub_path":"games/treasure_hunt.py","file_name":"treasure_hunt.py","file_ext":"py","file_size_in_byte":6071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2708060067","text":"from django.urls import path\r\nfrom . import views\r\n\r\napp_name = 'files'\r\n\r\nurlpatterns = [\r\n path('nfts', views.nfts, name=\"nfts\"),\r\n path('<int:pk>/nfts-update', views.nfts_update, name=\"nfts_update\"),\r\n path('<int:pk>/nfts-delete', views.nfts_delete, name=\"nfts_delete\"),\r\n]","repo_name":"Diallo75012/Personal-NFT-info-tracker","sub_path":"files/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19612367074","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 18-6-14 上午9:44\n# @Author : Shark\n# @Site : \n# @File : config.py.py\n# @Software: PyCharm\n\nimport logging\n\nDB_HOST = '172.16.1.210'\nDB_PORT = '3306'\nDB_NAME = 'ttd'\nDB_USER = 'ttd'\nDB_PASSWD = 'ROOT@mwteck123'\n\n# DB_HOST = '172.16.1.208'\n# DB_PORT = '3306'\n# DB_NAME = 'ttd_20180808005604'\n# DB_USER = 'root'\n# DB_PASSWD = 'root'\n\n# DB_HOST2 = '172.16.1.208'\n# DB_PORT2 = '3306'\n# DB_NAME2 = 'ttd_20180722005601'\n# DB_USER2 = 'root'\n# DB_PASSWD2 = 'root'\n\nADDRESS_JSON = 'var_s/address.json'\n\nTEST_RUN = False\nCREATE_BY = 'znh'\nSAVE_ID = 50\n\nBAIDU_MAP_AK = (\n 'O7hgHKGFW4DGh0n8TxaOxOdoiFLnrZGI', # 公司API KEY\n 'W7KW0i88jYz1x0OxX5hZ9VEcHD8sxkcC', # 吕强API KEY\n 'yQBIj4sdILLqW5iLZEvNdPMUseFB3ozG', # 徐恒API KEY\n 'he4grC14QgGg1u4GzSVB6xbUmDIGIe8b', # myfifi_2 API KEY\n 'pF2sGa7vwXzj3NiOrayli0aytoUDn9fg', # myfifi_3 API KEY\n 'xsF1V1EKn14M71yH0MYPk9sfmbER1KcG', # myfifi_4 API KEY\n '0B8yZIqc4GvE3UCOzUVzHtLCm3sSqlwk', # myfifi_55 API KEY\n 'YNw3saMUSu6jUM7l3jvOL5mhhLukcqcZ'\n)\n\n\ndef log():\n # 创建logger\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # 创建handler 命令行\n consoleHandler = logging.StreamHandler()\n consoleHandler.setLevel(logging.DEBUG)\n\n # 文件\n fileHandler = logging.FileHandler('var_s/log.txt')\n fileHandler.setLevel(logging.NOTSET)\n\n # 格式化输出样式\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n consoleHandler.setFormatter(formatter)\n fileHandler.setFormatter(formatter)\n\n # 添加到logger中\n logger.addHandler(consoleHandler)\n logger.addHandler(fileHandler)\n\n return logger\n","repo_name":"POISON-B/Study-Notes","sub_path":"data_import-V1.5/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73643039625","text":"#!/usr/bin/python3\n\n# basic gcd\ndef gcd(a, b):\n if a < 0:\n return gcd(-a, b)\n if b < 0:\n return gcd(a, -b)\n while b:\n a, b = b, a % b\n return a\n\n\n# basic lcm\ndef lcm(a, b):\n if a < 0:\n return lcm(-a, b)\n if b < 0:\n return lcm(a, -b)\n return a // gcd(a, b) * b # avoids overflow\n\n\n# returns (gcd(a, b), x, y) such that gcd(a,b) = ax + by\ndef egcd(a, b):\n if a < 0:\n r, x, y = egcd(-a, b, x, y)\n x *= -1\n return r, x, y\n if b < 0:\n r, x, y = egcd(a, -b, x, y)\n y *= -1\n return r, x, y\n u = y = 0\n v = x = 1\n while b:\n q = a // b\n a, b = b, a % b\n x, y, u, v = u, v, x - q * u, y - q * v\n return a, x, y\n\n\n# Chinese remainder theorem, simple version.\n# Given a, b, n, m, find z which simultaneously satisfies\n# z = a (mod m) and z = b (mod n).\n# This z, when it exists, is unique mod lcm(n,m).\n# If such z does not exist, then return -1.\n# z exists iff a == b (mod gcd(m,n))\ndef CRT(a, m, b, n):\n g, s, t = egcd(m, n)\n l = m // g * n\n r = a % g\n if b % g != r:\n return -1\n if g == 1:\n s = s % abs(l)\n t = t % abs(l)\n r1 = s * b % l\n r2 = t * a % l\n r1 = r1 * m % l\n r2 = r2 * n % l\n return (r1 + r2) % l\n else:\n return g * CRT(a // g, m // g, b // g, n // g) + r\n\n\n# Chinese remainder theorem, extended version.\n# Given a[K] and n[K], find z so that, for every i,\n# z = a[i] (mod n[i])\n# The solution is unique mod lcm( n[i] ) when it exists.\n# The existence criteria is just the extended version of what it is above.\ndef CRT_ext(a, n):\n ret = a[0]\n l = n[0]\n for i in range(1, len(a)):\n ret = CRT(ret, l, a[i], n[i])\n l = lcm(l, n[i])\n if ret == -1:\n return -1\n return ret\n\n\n# END\n\nimport sys\n\n\ndef test_gcd():\n print(\"test gcd\", file=sys.stderr)\n if gcd( 4, 7) != 1:\n print('gcd( 4, 7) != 1', file=sys.stderr)\n if gcd( 0, 7) != 7:\n print('gcd( 0, 7) != 7', file=sys.stderr)\n if gcd(14, 7) != 7:\n print('gcd(14, 7) != 7', file=sys.stderr)\n if gcd(14,21) != 7:\n print('gcd(14,21) != 7', file=sys.stderr)\n if gcd(-7, 7) != 7:\n print('gcd(-7, 7) != 7', file=sys.stderr)\n if gcd(-7,-7) != 7:\n print('gcd(-7,-7) != 7', file=sys.stderr)\n\n\ndef test_lcm():\n print(\"test lcm\", file=sys.stderr)\n for a in range(1, 100):\n for b in range(1, 100):\n if gcd(a, b) * lcm(a, b) != a * b:\n print('lcm * gcd != product:', a, b, file=sys.stderr)\n\n\ndef test_egcd():\n print(\"test egcd\", file=sys.stderr)\n for a in range(0, 100):\n for b in range(0, 100):\n g, s, t = egcd(a, b)\n if gcd(a, b) != g:\n print('gcd != egcd:', a, b, file=sys.stderr)\n if s * a + b * t != g:\n print('egcd s*a + t*b = g fail:', a, b, file=sys.stderr)\n\n\ndef test_CRT():\n print(\"test CRT\", file=sys.stderr)\n for m in range(1, 100):\n for n in range(1, 100):\n for a in range(0, m):\n for b in range(0, n):\n z = CRT(a, m, b, n)\n g = gcd(m, n)\n if a % g != b % g and z != -1:\n print('CRT gave an impossible solution: z =', a, 'mod', m, 'and z =', b, 'mod', n, file=sys.stderr)\n if a % g == b % g:\n if z % m != a or z % n != b:\n print('CRT gave a bad solution: z =', a, 'mod', m, 'and z =', b, 'mod', n, file=sys.stderr)\n\n\ndef test_CRT_ext():\n print(\"test CRT_ext\", file=sys.stderr)\n solution = 155\n n = [9, 4, 55, 77, 10, 166]\n a = [solution % ni for ni in n]\n r = CRT_ext(a, n)\n if solution != r:\n print('CRT_ext gave the wrong solution (test #1)', file=sys.stderr)\n\n n = [2, 6]\n a = [0, 1]\n r = CRT_ext(a, n)\n if r != -1:\n print('CRT_ext did not indicate failure (test #2)', file=sys.stderr)\n\n\nif __name__ == '__main__' and not hasattr(sys, 'ps1'):\n test_gcd()\n test_lcm()\n test_egcd()\n # test_CRT() # Note, this is slow\n test_CRT_ext()\n","repo_name":"kerrickstaley/python-contest-lib","sub_path":"Algebra.py","file_name":"Algebra.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25443348266","text":"# -*- encoding: utf-8 -*-\n\nfrom odoo import api, fields, models, _, tools\nfrom odoo.exceptions import UserError\nfrom xml.dom.minidom import parseString\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass AccountMove(models.Model):\n _inherit = 'account.move'\n\n \"\"\"\n @api.model_cr\n def init(self):\n cr = self._cr\n cr.execute(\"delete from ir_ui_view where name='report_invoice_document_inherit_sale';\")\n \"\"\"\n \n @api.depends('journal_id')\n def _get_address_issued_invoice(self):\n for rec in self:\n rec.address_issued_id = rec.journal_id.address_invoice_company_id or \\\n (rec.journal_id.company2_id and rec.journal_id.company2_id.partner_id) or \\\n rec.journal_id.company_id.partner_id or False\n rec.company_emitter_id = rec.journal_id.company2_id or rec.journal_id.company_id or False\n\n def _get_xml_file_content(self):\n attachment = self.env['ir.attachment'].search([('res_model', '=', 'account.move'), ('res_id', '=', self.id), ('name', 'ilike', '.xml')], limit=1)\n if not attachment:\n return False\n try:\n file_path = self.env['ir.attachment']._full_path('checklist').replace('checklist','') + attachment.store_fname\n attach_file = open(file_path, 'rb')\n xml_data = attach_file.read()\n attach_file.close()\n return xml_data\n except:\n _logger.error(\"No se pudo leer el archivo XML adjunto a esta factura, favor de revisar...\")\n return False\n \n \n \n @api.depends('cfdi_state','ref','state')\n def _get_uuid_from_attachment(self):\n for rec in self:\n rec.sat_serie = False\n rec.sat_uuid = False\n rec.sat_folio = False\n xml_data = rec._get_xml_file_content()\n if xml_data:\n #try:\n arch_xml = parseString(xml_data)\n is_xml_signed = arch_xml.getElementsByTagName('tfd:TimbreFiscalDigital')\n if is_xml_signed:\n xvalue = arch_xml.getElementsByTagName('tfd:TimbreFiscalDigital')[0]\n yvalue = arch_xml.getElementsByTagName('cfdi:Comprobante')[0] \n timbre = xvalue.attributes['UUID'].value\n serie, folio = False, False\n try:\n serie = yvalue.attributes['serie'].value\n except:\n pass\n try:\n folio = yvalue.attributes['folio'].value\n except:\n pass\n res = self.search([('sat_uuid', '=', timbre),('id','!=',rec.id),('company_id','=',rec.company_id.id)])\n if res:\n raise UserError(_(\"Error ! La factura ya se encuentra registrada en el sistema y no puede tener registro duplicado.\\n\\nLa factura con Folio Fiscal %s se encuentra registrada en el registro %s - Referencia: %s - ID: %s\")%(timbre, res.name, res.ref, res.id))\n rec.sat_uuid = timbre\n if serie:\n rec.sat_serie = serie\n if folio:\n rec.sat_folio = folio\n _logger.info(\"CFDI (Archivo XML) con UUID %s procesado exitosamente...\" % timbre)\n #except:\n # _logger.info(\"Ocurrió un error al intentar tomar los datos del archivo XML\")\n # pass\n\n\n \n @api.depends(\n 'line_ids.debit',\n 'line_ids.credit',\n 'line_ids.currency_id',\n 'line_ids.amount_currency',\n 'line_ids.amount_residual',\n 'line_ids.amount_residual_currency',\n 'line_ids.payment_id.state')\n def _compute_amount(self):\n super(AccountMove, self)._compute_amount()\n for rec in self.filtered(lambda w: w.is_invoice(include_receipts=True)):\n rec.amount_discount = sum(line.amount_discount for line in self.invoice_line_ids) or 0.0\n rec.amount_subtotal = sum(line.amount_subtotal for line in self.invoice_line_ids) or 0.0\n \n \n @api.depends('amount_total','currency_id')\n def _get_amount_to_text(self):\n for rec in self:\n currency = rec.currency_id.name.upper()\n # M.N. = Moneda Nacional (National Currency)\n # M.E. = Moneda Extranjera (Foreign Currency)\n currency_type = 'M.N.' if currency == 'MXN' else 'M.E.'\n # Split integer and decimal part\n amount_i, amount_d = divmod(rec.amount_total, 1)\n amount_d = round(amount_d, 2)\n amount_d = int(amount_d * 100)\n words = rec.company_id.currency_id.with_context(lang=self.env.user.partner_id.lang or 'es_ES').amount_to_text(amount_i).upper()\n invoice_words = '%(words)s %(amount_d)02d/100 %(curr_t)s' % dict(\n words=words, amount_d=amount_d, curr_t=currency_type)\n if currency != 'MXN':\n invoice_words = invoice_words.replace('PESOS',currency)\n invoice_words = invoice_words.replace('M.N.','M.E.')\n rec.amount_to_text = invoice_words\n \n \n @api.depends('invoice_line_ids.product_id')\n def _compute_deposit_invoice(self):\n ICPSudo = self.env['ir.config_parameter'].sudo()\n deposit_product_id_setting = int(ICPSudo.get_param('sale.default_deposit_product_id','0'))\n for rec in self:\n if deposit_product_id_setting and rec.invoice_line_ids:\n rec.deposit_invoice = bool(any(l.product_id.id == deposit_product_id_setting for l in rec.invoice_line_ids))\n else:\n rec.deposit_invoice = False\n \n @api.depends('cfdi_state','state','cancelation_request_ids')\n def _get_state_cancelation_requests(self):\n for rec in self:\n if not rec.cancelation_request_ids:\n rec.mailbox_state = \"no\"\n else:\n last_request = rec.cancelation_request_ids[-1]\n rec.mailbox_state = last_request.state\n \n use_for_cfdi = fields.Boolean(string=\"Usar para CFDIs\", related=\"journal_id.use_for_cfdi\")\n uso_cfdi_id = fields.Many2one('sat.uso.cfdi', 'Uso CFDI', readonly=True, \n states={'draft': [('readonly', False)]},\n required=False) \n type_document_id = fields.Many2one('sat.tipo.comprobante', 'Tipo de Comprobante', required=False) \n metodo_pago_id = fields.Many2one('sat.metodo.pago','Metodo de Pago', readonly=True, \n states={'draft': [('readonly', False)]},\n help='Metodo de Pago Requerido por el SAT')\n type_rel_cfdi_ids = fields.One2many('sat.invoice.cfdi.rel', 'invoice_rel_id', 'CFDI Relacionados') \n tipo_cambio = fields.Float('Tipo Cambio', digits=(14,6), default=1.0)\n type_rel_id = fields.Many2one('sat.tipo.relacion.cfdi','Relacion CFDI',\n readonly=True, states={'draft': [('readonly', False)]})\n\n deposit_invoice = fields.Boolean('Anticipo', compute='_compute_deposit_invoice', store=True)\n\n deposit_invoice_used = fields.Boolean('Anticipo Relacionado', help='Indica que esta factura ya fue relacionada en el XML de otra.', copy=False )\n\n deposit_invoice_rel_id = fields.Many2one('account.move','Factura Relacionada como Anticipo', help='Indica a que factura fue relacionada en su XML.', copy=False )\n cfdi_complemento = fields.Selection([('na','No Aplica')], string='Complemento CFDI', \n readonly=True, states={'draft': [('readonly', False)]}, \n copy=False, store=True, default=lambda a:'na', required=True)\n\n \n amount_discount = fields.Monetary(string='Total Descuento', store=True, \n readonly=True, compute='_compute_amount',\n tracking=True)\n amount_subtotal = fields.Monetary(string='Total Subtotal', store=True, readonly=True, compute='_compute_amount')\n \n sat_uuid = fields.Char(compute='_get_uuid_from_attachment', string=\"CFDI UUID\", required=False, store=True, index=True)\n sat_folio = fields.Char(compute='_get_uuid_from_attachment', string=\"CFDI Folio\", required=False, store=True, index=True)\n sat_serie = fields.Char(compute='_get_uuid_from_attachment', string=\"CFDI Serie\", required=False, store=True, index=True)\n \n #### Columns ###################\n\n fname_invoice = fields.Char(compute='_get_fname_invoice', string='Nombre de Archivo',\n help='Nombre usado para el archivo XML del CFDI')\n invoice_datetime = fields.Datetime(string='Fecha CFDI', readonly=True, \n states={'draft': [('readonly', False)]}, copy=False,\n help=\"Deje vacío para usar la fecha actual\")\n date_invoice_tz = fields.Datetime(string='Fecha Factura con TZ', compute='_get_date_invoice_tz',\n help='Fecha de la Factura con Zona Horaria', copy=False)\n amount_to_text = fields.Char(compute='_get_amount_to_text', string='Monto en texto', store=True,\n help='Monto de la Factura en texto')\n # Campos donde se guardara la info de CFDI\n \n no_certificado = fields.Char(string='No. Certificado', size=64, help='Number of serie of certificate used for the invoice')\n certificado = fields.Text('Certificado', help='Certificate used in the invoice')\n sello = fields.Text('Sello', help='Digital Stamp')\n cadena_original = fields.Text('Cadena Original.')\n \n \n \n cfdi_cbb = fields.Binary(string='Código Bidimensional', readonly=True, copy=False)\n cfdi_sello = fields.Text('CFDI Sello', readonly=True, help='Sign assigned by the SAT', copy=False)\n cfdi_no_certificado = fields.Char('CFDI No. Certificado', size=32, readonly=True,\n help='Serial Number of the Certificate', copy=False)\n cfdi_cadena_original = fields.Text(string='Cadena Original', readonly=True, copy=False)\n cfdi_fecha_timbrado = fields.Datetime(string='Fecha Timbrado', readonly=True, copy=False)\n cfdi_fecha_cancelacion = fields.Datetime(string='Fecha Cancelación', readonly=True,\n help='Fecha cuando la factura es Cancelada', copy=False)\n cfdi_folio_fiscal = fields.Char(string='Folio Fiscal (UUID)', size=64, readonly=True,\n help='Folio Fiscal del Comprobante CFDI, también llamado UUID', copy=False)\n\n cfdi_state = fields.Selection([('draft','Pendiente'),\n ('xml_unsigned','XML a Timbrar'),\n ('xml_signed','XML Timbrado'),\n ('pdf','PDF del CFDI'),\n ('sent', 'CFDI Enviado por Correo'),\n ('in_process_cancel','CFDI En proceso cancelación'),\n ('uuid_no_cancel','CFDI no Cancelable'),\n ('uuid_no_cancel_by_customer','CFDI no Cancelable por el Cliente'),\n ('cancel','Cancelado'),\n ], string=\"Estado CFDI\", readonly=True, default='draft',\n help='Estado del Proceso para generar el Comprobante Fiscal', copy=False)\n \n \n cancelation_request_ids = fields.One2many('account.move.cancelation.record', 'invoice_id', 'Solicitudes de Cancelación', copy=False)\n mailbox_state = fields.Selection([('cancel','Cancelacion Solicitud'),\n ('no','Sin Solicitudes'),\n ('process','En Proceso'),\n ('done','Aceptada'),\n ('rejected','Rechazada por el Cliente'),\n ('no_cancel','CFDI no se puede Cancelar')], \n string='Estado Cancelacion', compute=\"_get_state_cancelation_requests\", \n store=True, tracking=True)\n cancel_wht_mailbox = fields.Boolean('Cancelar sin Solicitud', copy=False, tracking=True)\n \n # PENDIENTE => Definir el metodo donde se usaran\n #pdf_file_signed = fields.Binary(string='Archivo PDF Timbrado', readonly=True, help='Archivo XML que se manda a Timbrar al PAC', copy=False)\n #xml_file_no_sign = fields.Binary(string='Archivo XML a Timbrar', readonly=True, help='Archivo XML que se manda a Timbrar al PAC', copy=False)\n #xml_file_signed = fields.Binary(string='Archivo XML Timbrado', readonly=True, help='Archivo XML final (después de timbrado y Addendas)', copy=False)\n xml_file_no_sign_index = fields.Text(string='XML a Timbrar', readonly=True, help='Contenido del Archivo XML que se manda a Timbrar al PAC', copy=False)\n xml_file_signed_index = fields.Text(string='XML Timbrado', readonly=True, help='Contenido del Archivo XML final (después de timbrado y Addendas)', copy=False)\n cfdi_last_message = fields.Text(string='Last Message', readonly=True, help='Message generated to upload XML to sign', copy=False)\n xml_acuse_cancelacion = fields.Text('XML Acuse Cancelacion', readonly=True)\n cfdi_pac = fields.Selection([], string='PAC', readonly=True, store=True, copy=False)\n #pac_id = fields.Many2one('params.pac', string='Pac', readonly=True, help='Pac usado para Timbrar la Factura')\n \n ##################################\n pay_method_id = fields.Many2one('pay.method', string='Forma de Pago', readonly=True, \n states={'draft': [('readonly', False)]})\n \n pay_method_ids = fields.Many2many('pay.method', 'account_invoice_pay_method_rel', 'invoice_id', 'pay_method_id', \n readonly=True, states={'draft': [('readonly', False)]},\n string=\"Formas de Pago\")\n \n acc_payment = fields.Many2one('res.partner.bank', string='Cuenta Bancaria', readonly=True, \n states={'draft': [('readonly', False)]},\n help='Is the account with which the client pays the invoice, \\\n if not know which account will used for pay leave empty and \\\n the XML will show \"“Unidentified”\".')\n\n\n address_issued_id = fields.Many2one('res.partner', compute='_get_address_issued_invoice', \n string='Dirección Emisión', store=True,\n help='This address will be used as address that issued for electronic invoice')\n \n company_emitter_id = fields.Many2one('res.company', compute='_get_address_issued_invoice', store=True,\n string='Compañía Emisora', \n help='This company will be used as emitter company in the electronic invoice')\n \n\n ## DESMARCAR COMENTARIO payment_line_ids = fields.One2many('account.payment.invoice', 'invoice_id', 'Pagos', readonly=True)\n l10n_mx_edi_is_required = fields.Boolean(string=\"Dummy\", \n help=\"Este campo es dummy porque lo usa el modulo l10n_mx_edi y afecta en el Template del envio de factura \")\n \n \n \n @api.depends('number', 'journal_id', 'invoice_date')\n def name_get(self):\n result = []\n for rec in self:\n if rec.move_type in ('out_invoice', 'out_refund', 'in_invoice', 'in_refund'):\n if rec.name and rec.journal_id and rec.invoice_date:\n name = rec.name + ' / ' + rec.journal_id.name + ' / ' + rec.invoice_date.isoformat() \n result.append((rec.id, name))\n else:\n if rec.invoice_date:\n name = 'SN' + ' / ' + rec.journal_id.name + ' / ' + rec.invoice_date.isoformat()\n else:\n name = 'SN' + ' / ' + rec.journal_id.name \n result.append((rec.id, name))\n else:\n result.append((rec.id, rec.name))\n return result\n\n\n \n @api.onchange('partner_id')\n def _onchange_partner_id(self):\n res = super(AccountMove, self)._onchange_partner_id()\n self.metodo_pago_id = self.partner_id.commercial_partner_id.property_payment_term_id.id\n self.uso_cfdi_id = self.partner_id.commercial_partner_id.uso_cfdi_id.id\n self.pay_method_id = self.partner_id.commercial_partner_id.pay_method_id.id\n return res\n \n \n @api.onchange('invoice_payment_term_id')\n def _invoice_payment_term_id(self):\n if self.invoice_payment_term_id:\n self.metodo_pago_id = self.invoice_payment_term_id.metodo_pago_id.id\n else:\n self.metodo_pago_id = False\n\n \"\"\"\n def post(self):\n sat_tipo_obj = self.env['sat.tipo.comprobante']\n for rec in self.filtered(lambda w: w.is_invoice(include_receipts=True) and \\\n w.type in ('out_invoice','out_refund') and \\\n w.journal_id.use_for_cfdi):\n if not rec.uso_cfdi_id:\n raise UserError('Error!\\nEl campo Uso CFDI es Obligatorio.')\n tipo_documento = 'I' if rec.type == 'out_invoice' else 'E'\n tipo_id = sat_tipo_obj.search([('code','=',tipo_documento)], limit=1)\n rec.type_document_id = tipo_id.id if tipo_id else False\n res = super(AccountMove, self).post()\n return res\n \"\"\"\n\n @api.model\n def create(self, vals):\n res = super(AccountMove, self).create(vals)\n sat_tipo_obj = self.env['sat.tipo.comprobante']\n type_document = res.move_type\n tipo_documento = 'I' if res.move_type == 'out_invoice' else 'E'\n tipo_id = sat_tipo_obj.search([('code','=',tipo_documento)], limit=1)\n res.type_document_id = tipo_id.id if tipo_id else False\n if res.move_type == 'out_invoice':\n if not res.metodo_pago_id:\n if res.invoice_payment_term_id and res.invoice_payment_term_id.metodo_pago_id:\n res.metodo_pago_id = res.invoice_payment_term_id.metodo_pago_id.id\n else:\n metodo_pago_id = self.env.ref('l10n_mx_einvoice.metodo_00', False)\n if metodo_pago_id:\n res.metodo_pago_id = metodo_pago_id.id\n if not res.uso_cfdi_id:\n if res.partner_id and res.partner_id.uso_cfdi_id:\n res.uso_cfdi_id = res.partner_id.uso_cfdi_id.id\n else:\n uso_cfdi_id = self.env.ref('l10n_mx_einvoice.sat_usdo_cfdi_03', False)\n if uso_cfdi_id:\n res.uso_cfdi_id = uso_cfdi_id.id\n if not res.pay_method_id:\n if res.partner_id and res.partner_id.pay_method_id:\n res.pay_method_id = res.partner_id.pay_method_id.id\n else:\n pay_method_id = self.env.ref('l10n_mx_einvoice.pay_method_01', False)\n if pay_method_id:\n res.pay_method_id = pay_method_id.id\n\n return res\n\n \n\nclass AccountMoveLine(models.Model):\n _inherit = 'account.move.line'\n\n \n @api.depends('discount', 'price_unit', 'quantity')\n def _compute_discount_amounts(self):\n for line in self:\n line.amount_subtotal = line.price_unit * line.quantity\n line.amount_discount = (line.discount/100.0) * line.price_unit * line.quantity\n \n amount_discount = fields.Monetary(string='Monto Descuento', store=True, \n compute='_compute_discount_amounts')\n amount_subtotal = fields.Monetary(string='Monto sin Descuento', store=True, \n compute='_compute_discount_amounts')\n \n \n \nclass AccountPaymentTerm(models.Model):\n _inherit ='account.payment.term'\n\n metodo_pago_id = fields.Many2one('sat.metodo.pago','Metodo de Pago', \n help='Metodo de Pago Requerido por el SAT', )\n\n\nclass SatInvoiceCFDIRel(models.Model):\n _name = 'sat.invoice.cfdi.rel'\n _description = 'Relacion de CFDI'\n #_rec_name = 'invoice_id' \n\n payment_state = {\n 'not_paid': 'No Pagada',\n 'in_payment': 'En Proceso de Pago',\n 'paid': 'Pagada',\n 'partial': 'Parcialmente Pagada',\n 'reversed': 'Reversada',\n 'invoicing_legacy': 'Historico'}\n \n @api.depends('invoice_id')\n def _get_name_invoice_id(self):\n payment_state = {\n 'not_paid': 'No Pagada',\n 'in_payment': 'En Proceso de Pago',\n 'paid': 'Pagada',\n 'partial': 'Parcialmente Pagada',\n 'reversed': 'Reversada',\n 'invoicing_legacy': 'Historico'}\n for rec in self:\n _logger.info(\"\")\n rec.name = '%s - %s - %s - %s' % (rec.invoice_id.name, rec.invoice_id.invoice_date, rec.invoice_id.cfdi_folio_fiscal, payment_state[rec.invoice_id.payment_state])\n \n name = fields.Char(string=\"Referencia\",\n compute=\"_get_name_invoice_id\")\n invoice_id = fields.Many2one('account.move', 'Factura', required=True)\n move_name = fields.Char(string=\"Factura #\", related=\"invoice_id.name\")\n date_invoice= fields.Date(string=\"Fecha\", related=\"invoice_id.invoice_date\")\n cfdi_folio_fiscal = fields.Char(string=\"Folio Fiscal\", related=\"invoice_id.cfdi_folio_fiscal\")\n amount_total= fields.Monetary(string=\"Total\", related=\"invoice_id.amount_total\")\n currency_id = fields.Many2one('res.currency', string=\"Moneda\", related=\"invoice_id.currency_id\")\n payment_state = fields.Selection(string=\"Estado Pago\", related=\"invoice_id.payment_state\")\n state = fields.Selection(string='Estado', related=\"invoice_id.state\")\n \n invoice_rel_id = fields.Many2one('account.move', 'ID Rel')\n onchange_domain = fields.Boolean('Disparar Dominios', default=True)\n\n\n @api.onchange('onchange_domain')\n def onchange_relation(self):\n\n domain={}\n if self.invoice_rel_id.type_rel_id:\n if self.invoice_rel_id.type_rel_id.code == '07':\n domain.update({'invoice_id':[('deposit_invoice','=',True),\n ('deposit_invoice_used','=',False),\n ('payment_state','!=','not_paid'),\n ('move_type','in',('out_invoice','out_refund')),\n ('cfdi_folio_fiscal','!=',False)]\n }) \n elif self.invoice_rel_id.type_rel_id.code == '04':\n domain.update({'invoice_id':[('move_type','in',('out_invoice','out_refund')),\n ('cfdi_folio_fiscal','!=',False)]\n })\n else:\n domain.update({'invoice_id':[('payment_state','!=','not_paid'),\n ('move_type','in',('out_invoice','out_refund')),\n ('cfdi_folio_fiscal','!=',False)]\n }) \n\n else:\n domain.update({'invoice_id':[('payment_state','!=','not_paid'),\n ('move_type','in',('out_invoice','out_refund')),\n ('cfdi_folio_fiscal','!=',False)]\n })\n #print('domain: ', domain)\n return {'domain': domain}\n \n \nclass AccountMoveCancelationRecord(models.Model):\n _name = 'account.move.cancelation.record'\n _description = 'Solicitud de Cancelacion'\n _rec_name = 'folio_fiscal' \n\n date_request = fields.Datetime('Fecha Solicitud', help='Indica la fecha en la que se realizo la Solicitud de Cancelacion', )\n state = fields.Selection([('cancel','Cancelacion Solicitud'),\n ('process','En Proceso'),\n ('done','Aceptada'),\n ('rejected','Rechazada por el Cliente'),\n ('no_cancel','CFDI no se puede Cancelar')], string='Estado Solicitud')\n invoice_id = fields.Many2one('account.move',string='ID Ref')\n request_ignored = fields.Boolean('Ignorar Solicitud')\n folio_fiscal = fields.Char('Folio Fiscal',size=128)\n message_invisible = fields.Text(\"Mensaje PAC\")\n\n \n def solitud_cancelacion_asincrona(self):\n ## Se deja Abierta la Conexion para la Consulta con el PAC ###\n return {}\n\n \n \n def solitud_cancelacion_consulta_status(self):\n ## Se deja Abierta la Conexion para la Consulta con el PAC ###\n return {}\n \n \n def unlink_me(self):\n self.unlink()\n","repo_name":"daniboy2215/desarrollo-mx","sub_path":"l10n_mx_einvoice/models/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":25409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24020464871","text":"#!/usr/bin/python3\n\"\"\"This module iterates in a string and finds special characters\nprints them and creates two new lines\"\"\"\n\n\ndef text_indentation(text):\n \"\"\"This fucntion iterates in a string if specified characters\n are found they will get printed and then two new lines\n text (str): text must be type str\"\"\"\n\n if type(text) != str:\n raise TypeError(\"text must be a string\")\n\n for i in text:\n if i == \".\" or i == \"?\" or i == \":\":\n print(f\"{i}\", end=\"\")\n print(\"\")\n print(\"\")\n else:\n print(f\"{i}\", end=\"\")\n","repo_name":"yahdielo/holbertonschool-higher_level_programming","sub_path":"python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12498336582","text":"from enum import Enum, auto\n\nADDPRED = SUBPRED = 1\nMULPRED = DIVPRED = 2\nEXPPRED = 3\nFUNCPRED = 4\n\nclass Assoc(Enum):\n NONE = auto()\n LEFT = auto()\n RIGHT = auto()\n\nclass Type(Enum):\n NUMBER = auto()\n OPERATOR = auto()\n FUNCTION = auto()\n VARIABLE = auto()\n CONSTANT = auto()\n LPAREN = auto()\n RPAREN = auto()\n\nclass Token():\n def __init__(self, type, **kwargs):\n self.type = type\n if type == Type.NUMBER:\n self.val = kwargs['val']\n elif type == Type.OPERATOR:\n self.op = kwargs['op']\n if self.op == '+':\n self.pred = ADDPRED\n self.argnum = 2\n self.assoc = Assoc.NONE\n elif self.op == '-':\n self.pred = SUBPRED\n self.argnum = 2\n self.assoc = Assoc.LEFT\n elif self.op == '*':\n self.pred = MULPRED\n self.argnum = 2\n self.assoc = Assoc.NONE\n elif self.op == '/':\n self.pred = DIVPRED\n self.argnum = 2\n self.assoc = Assoc.LEFT\n elif self.op == '^':\n self.pred = EXPPRED\n self.argnum = 2\n self.assoc = Assoc.RIGHT\n else:\n raise ValueError('Not supported operator: ' + self.op)\n elif type == Type.FUNCTION:\n raise ValueError('Functions not yet implemented.')\n elif type == Type.VARIABLE:\n self.name = kwargs['name']\n elif type == Type.CONSTANT:\n self.name = kwargs['name']\n self.val = kwargs['val']\n elif type == Type.RPAREN:\n pass\n elif type == Type.LPAREN:\n pass\n else:\n raise ValueError('Not supported token: ' + str(type))\n\n def tokenize(str):\n raise ValueError('Tokenize Method not implemented yet.')\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()","repo_name":"camelliafangirl/math","sub_path":"mathtoken.py","file_name":"mathtoken.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69919474824","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\nADD = 0b10100000\nLDI = 0b10000010\nPRN = 0b01000111\nHLT = 0b00000001\nMUL = 0b10100010\nPUSH = 0b01000101\nPOP = 0b01000110\nCALL = 0b01010000\nRET = 0b00010001\nCMP = 0b10100111\nJMP = 0b01010100\nJEQ = 0b01010101\nJNE = 0b01010110\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.ram = [0] * 256\n # Register\n self.reg = [0] * 8\n # Program Counter, the address in memory of the current instruction\n self.pc = 0\n # Instruction Register\n self.ir = None\n # Stack pointer\n self.reg[7] = 0xf4\n # Flag Register\n self.fl = 0b0\n \n # Branch table\n self.branchtable = {\n LDI: self.LDI,\n PRN: self.PRN, \n HLT: self.HLT,\n MUL: lambda oper_a, oper_b: self.alu(\"MUL\", oper_a, oper_b),\n ADD: lambda oper_a, oper_b: self.alu(\"ADD\", oper_a, oper_b),\n PUSH: self.PUSH,\n POP: self.POP,\n CALL: self.CALL,\n RET: self.RET,\n CMP: lambda oper_a, oper_b: self.alu(\"CMP\", oper_a, oper_b),\n JMP: self.JMP,\n JEQ: self.JEQ,\n JNE: self.JNE\n }\n\n def load(self, filepath):\n \"\"\"Load a program into memory.\"\"\"\n\n # Next address in memory to insert value into\n address = 0\n\n # Open program to run\n with open(filepath) as program:\n # Iterate over each line in the program\n for line in program:\n # Shorten line down to binary length\n shortend_line = line[:8]\n # Filter out all characters except for digits\n cleaned_line = ''.join(filter(str.isdigit, shortend_line))\n \n # Check if the cleaned line is valid in length\n if len(cleaned_line) is 8:\n # Insert instruction into memory\n self.ram_write(int(cleaned_line, 2), address)\n # Increment to next address\n address += 1\n \n # execute program in memory\n self.run()\n \n def ram_read(self, address):\n \"\"\"Accepts an address and to read and returns the value stored ram\"\"\"\n return self.ram[address]\n\n def ram_write(self, value, address):\n \"\"\"Accepts a value to write, and an address to write to in ram\"\"\"\n self.ram[address] = value\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n\n if op == \"ADD\":\n self.reg[reg_a] += self.reg[reg_b]\n #elif op == \"SUB\": etc\n elif op == \"MUL\":\n self.reg[reg_a] *= self.reg[reg_b]\n elif op == \"CMP\":\n if self.reg[reg_a] < self.reg[reg_b]:\n self.fl = 0b00000100\n elif self.reg[reg_a] > self.reg[reg_b]: \n self.fl = 0b00000010\n elif self.reg[reg_a] == self.reg[reg_b]:\n self.fl = 0b00000001\n else:\n raise Exception(\"Unsupported ALU operation\")\n \n @property\n def stack_pointer(self):\n return self.reg[7]\n\n @stack_pointer.setter\n def stack_pointer(self, value):\n self.reg[7] = value\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n #self.fl,\n #self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n print()\n \n def LDI(self):\n # Create reference of register address\n register_address = self.ram[self.pc + 1]\n # Create reference of register value\n register_value = self.ram[self.pc + 2]\n # Select register address and assign it a value\n self.reg[register_address] = register_value\n\n def PRN(self):\n # Retrieve value from register \n value = self.reg[self.ram_read(self.pc + 1)]\n # Print value\n print(f\"Print: {value}\")\n\n def HLT(self):\n # Throw SystemExit exception\n quit()\n \n def PUSH(self):\n # Decrement stack pointer\n self.stack_pointer -= 1\n # Copy value from register and place it into the stack\n self.ram[self.stack_pointer] = self.ram[self.pc + 1]\n \n def POP(self):\n # Copy value from stack and place it into register\n self.ram[self.pc + 1] = self.ram[self.stack_pointer]\n # Increment stack pointer\n self.stack_pointer += 1\n \n def CALL(self):\n self.stack_pointer -= 1\n self.ram[self.stack_pointer] = self.ram[self.pc + 1]\n self.pc = self.reg[self.pc + 1]\n\n def RET(self):\n self.pc = self.ram[self.stack_pointer]\n self.stack_pointer += 1\n \n def JMP(self):\n self.pc = self.reg[self.ram[self.pc + 1]]\n \n def JEQ(self):\n if self.fl == 1:\n self.JMP()\n else:\n self.pc += 2\n\n def JNE(self):\n if self.fl is not 1:\n self.JMP()\n else:\n self.pc += 2\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n while True:\n # Retrieve op code from memory and store it in the istruction register\n self.ir = self.ram_read(self.pc)\n sets_pc = (self.ir & 0b00010000) >> 4\n # Create reference of potential operands\n oper_a = self.ram[self.pc + 1]\n oper_b = self.ram[self.pc + 2]\n # Bitshift and MASK op code to check if it requires the ALU\n is_alu_op = (self.ir & 0b00100000) >> 5\n\n if is_alu_op:\n self.branchtable[self.ir](oper_a, oper_b)\n else:\n self.branchtable[self.ir]()\n\n if not sets_pc:\n self.pc += ((self.ir & 0b11000000) >> 6) + 1\n \n","repo_name":"Fractured2K/Computer-Architecture","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":6063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23690705668","text":"import psutil\nimport platform\nimport time\nfrom nonebot import on_regex\nfrom nonebot.typing import T_State\nfrom nonebot.adapters.onebot.v11 import GroupMessageEvent,Bot,Message\n\n'''\nwriten by 萌新源 at 2022/12/24\n本插件需要psutil拓展库,没有的请使用命令 pip install psutil 进行安装\n本插件主要功能是获取系统各项指标状态,以及硬件信息,操作系统等\n命令:系统状态\n'''\nsystem_msg = on_regex(pattern = r'^系统信息$')\n\nmem = psutil.virtual_memory()\n# 系统总计内存\nzj = round(float(mem.total) / 1024 / 1024 / 1024, 1)\n# 系统已经使用内存\nysy = round(float(mem.used) / 1024 / 1024 / 1024, 1)\n# 系统空闲内存\nkx = round(float(mem.free) / 1024 / 1024 / 1024, 1)\n# CPU逻辑核心数量\ncore = psutil.cpu_count()\n# CPU物理核心数量\ncore2 = psutil.cpu_count(logical=False)\n# 内存占用量\npercent = mem.percent\n\n#网速获取\nsent_before = psutil.net_io_counters().bytes_sent # 已发送的流量\nrecv_before = psutil.net_io_counters().bytes_recv # 已接收的流量\ntime.sleep(1)\nsent_now = psutil.net_io_counters().bytes_sent\nrecv_now = psutil.net_io_counters().bytes_recv\nsent = (sent_now - sent_before) / 1024 # 算出1秒后的差值\nrecv = (recv_now - recv_before) / 1024\n\nrun_time = time.strftime(\"[%Y-%m-%d %H:%M:%S]\", time.localtime())\nupload_speed = \"上传:{0}KB/s\".format(\"%.2f\" % sent)\ndownload_speed = \"下载:{0}KB/s\".format(\"%.2f\" % recv)\ncore_1 = f'CPU逻辑核心数:{core}'\ncore_2 = f'CPU物理核心数:{core2}'\ntotal_nc = f'系统总计内存:{zj}GB'\nused_nc = f'系统已使用内存:{ysy}GB'\nfree_nc = f'系统空闲内存:{kx}GB'\npercent_nc = f'内存占比{percent}'\n\n#获取操作系统\nsystem = platform.platform()\nsystem = f'操作系统:{system}'\n\nresult = f'{run_time}\\n{upload_speed}\\n{download_speed}\\n{core_1}\\n{core_2}\\n{total_nc}\\n{used_nc}\\n{free_nc}\\n{percent_nc}\\n{system}'\n\n\n@system_msg.handle()\nasync def yy(bot: Bot, event: GroupMessageEvent, state: T_State):\n\n # at_ = f\"[CQ:at,qq={event.get_user_id()}]\"\n await system_msg.send(Message(result))\n\n\n","repo_name":"mengxinyuan638/cici-bot","sub_path":"辞辞源代码/system_type.py","file_name":"system_type.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"44326976192","text":"import requests\nimport json\nimport os\nimport time\nfrom flask_socketio import SocketIO\nfrom flask_cors import CORS\n\nfrom flask import Flask, send_from_directory, json, session\nfrom dotenv import load_dotenv, find_dotenv\n\nload_dotenv(find_dotenv()) \n\n\nOS_TOKEN = os.getenv('OS_KEY')\nETHERSCAN_TOKEN = os.getenv('ETHERSCAN_TOKEN')\nOPENAI_TOKEN = os.getenv('OPENAI_KEY')\n\n#import web3\n\n############################################################\n\n''' Initiating Flask App '''\n\napp = Flask(__name__, static_folder='./build/static')\n\ncors = CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\nsocketio = SocketIO(\n app,\n cors_allowed_origins=\"*\",\n json=json,\n manage_session=False\n)\n\n\n@app.route('/', defaults={\"filename\": \"index.html\"})\n@app.route('/<path:filename>')\ndef index(filename):\n return send_from_directory('./build', filename)\n\n\n#############################################################\n\n''' Opensea URLs '''\n\nRETRIEVE_ASSET_URL_1 = \"https://api.opensea.io/api/v1/assets?owner=\"\nRETRIEVE_ASSET_URL_2 = \"&order_direction=desc&limit=50&include_orders=false\"\n\n##############################################################\n\n''' Etherscan Urls'''\n\nETHERSCAN_MAINNET_URL = 'https://api.etherscan.io/'\nGAS_ORACLE_URL = 'https://api.etherscan.io/api?module=gastracker&action=gasoracle&apikey=' + ETHERSCAN_TOKEN\n\n##############################################################\n\n''' Openai Urls '''\n\nGET_AI = 'https://api.openai.com/v1/models/{model}'\nPOST_AI = 'https://api.openai.com/v1/completions'\n\n\n\n##############################################################\n\n\n''' Api Headers '''\n\nos_headers = {\n\n \"Accept\": \"application/json\",\n \"X-API-KEY\": OS_TOKEN\n}\n\n\n\n##############################################################\n\n''' Functions '''\n\ndef show_owner_nfts(owner, headers):\n url = RETRIEVE_ASSET_URL_1 + owner + RETRIEVE_ASSET_URL_2\n response = requests.get(url, headers=headers)\n assets = response.json()['assets']\n num = 0\n\n image = assets[0]['image_url']\n name = assets[0]['name']\n names = []\n images = []\n\n print(assets[0]['image_url'])\n print(name)\n images.append(image)\n \n for i in range(len(assets)):\n num += 1\n print(\"this is i: \" + str(i))\n print(str(num) + \": \" + str(assets[i]['name']))\n images.append(assets[i]['image_url'])\n names.append(assets[i]['name'])\n\n result = []\n result.append(images)\n result.append(names)\n result.append(len(assets))\n return result\n\n\ndef getGasPrice(url):\n response = requests.get(url)\n #print(response.json())\n safeGas = response.json()['result']['SafeGasPrice']\n proposedGas = response.json()['result']['ProposeGasPrice']\n fastGas = response.json()['result']['FastGasPrice']\n\n print('Safe Gas: ' + safeGas)\n print('Proposed Gas: ' + proposedGas)\n print('Fast Gas: ' + fastGas)\n\n gas = {\n 'safe': safeGas,\n 'propose': proposedGas,\n 'fast': fastGas\n }\n\n return gas\n\n\n\n\n##############################################################\n\n\nCONTRACT = \"0xdfd9d33aabbdf759c78a96189a97a5fa76a8d0e0\"\nOG_CONTRACT = \"0xb4f62bbc6e6098b4e6b6ade94684e0fb4e2a79e3\"\n\n\n##############################################################\n\n''' Sockets '''\n\n@socketio.on('connect')\ndef on_connect():\n print('Somone connected!')\n on_gas()\n\n@socketio.on('disconnect')\ndef on_disconnect():\n print('Someone disconnected!')\n\n@socketio.on('user')\ndef on_login(data):\n print('user logged in')\n print(data)\n feed_content = show_owner_nfts(data['userName'], os_headers)\n send_data = [feed_content[0], feed_content[1], feed_content[2]]\n \n #send back the feed information\n socketio.emit('feed', send_data, broadcastT=True, include_self=False)\n\n@socketio.on('feed')\ndef on_feed():\n print('feed being called')\n\n@socketio.on('gas')\ndef on_gas():\n print('gas being called')\n send_data = getGasPrice(GAS_ORACLE_URL)\n socketio.emit('gas', send_data, include_self=True)\n \n\n\n##############################################################\n\n\n\nif __name__ == \"__main__\":\n# Note that we don't call app.run anymore. We call socketio.run with app arg\n #db.create_all()\n #getGasPrice(GAS_ORACLE_URL)\n socketio.run(\n app,\n host=os.getenv('IP', '0.0.0.0'),\n port=8081 if os.getenv('C9_PORT') else int(os.getenv('PORT', 8081)),\n )\n\n \n\n \n","repo_name":"0xKashDev/SilvIO","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29148269293","text":"import os\nfrom typing import Iterable, List, Tuple, TextIO\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass Cell:\n value: int\n called: bool = False\n\n\nclass Board:\n rows: List[List[Cell]]\n\n def __init__(self) -> None:\n self.rows = []\n\n def add_row(self, row: Iterable[int]) -> None:\n new_row = [Cell(v) for v in row]\n if self.rows:\n assert len(new_row) == len(self.rows[0])\n self.rows.append(new_row)\n\n def mark(self, value: int) -> None:\n for row in self.rows:\n for cell in row:\n if cell.value == value:\n cell.called = True\n\n @property\n def win(self) -> bool:\n if any(all(cell.called for cell in row) for row in self.rows):\n return True\n else:\n return any(\n all(self.rows[i][j].called for i in range(len(self.rows)))\n for j in range(len(self.rows[0]))\n )\n\n @property\n def score(self) -> int:\n return sum(cell.value for row in self.rows for cell in row if not cell.called)\n\n\ndef parse_input(input_file: TextIO) -> Tuple[List[int], List[Board]]:\n draw = [int(d) for d in next(input_file).split(\",\")]\n next(input_file) # skip blank line\n board = Board()\n boards = []\n for line in input_file:\n if line.strip():\n board.add_row(int(v) for v in line.split())\n else:\n boards.append(board)\n board = Board()\n if board.rows:\n boards.append(board)\n return draw, boards\n\n\ndef get_winning_score(draw: Iterable[int], boards: List[Board]) -> int:\n for d in draw:\n for board in boards:\n board.mark(d)\n if board.win:\n return board.score * d\n raise RuntimeError(\"No winners\")\n\n\ndef get_losing_score(draw: Iterable[int], boards: List[Board]) -> int:\n winners = 0\n for d in draw:\n for board in boards:\n if not board.win:\n board.mark(d)\n if board.win:\n winners += 1\n if winners == len(boards):\n return board.score * d\n raise RuntimeError(\"No losers\")\n\n\nif __name__ == \"__main__\":\n os.chdir(os.path.dirname(__file__))\n with open(\"../data/day_04.in\") as input_file:\n draw, boards = parse_input(input_file)\n print(\"wining score:\", get_winning_score(draw, boards))\n\n with open(\"../data/day_04.in\") as input_file:\n draw, boards = parse_input(input_file)\n print(\"losing score:\", get_losing_score(draw, boards))\n","repo_name":"phoenix10k/advent-of-code","sub_path":"aoc-2021/python/src/day_04.py","file_name":"day_04.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35171109357","text":"from imgui_bundle import hello_imgui, immapp, imgui\n\nimport sys\nfrom typing import Optional\n\n\ntest_open_metrics: Optional[imgui.test_engine.Test] = None\ntest_capture: Optional[imgui.test_engine.Test] = None\ntest_exit: Optional[imgui.test_engine.Test] = None\n\n\ndef my_register_tests():\n global test_open_metrics, test_capture, test_exit\n engine = hello_imgui.get_imgui_test_engine()\n\n # Open Metrics window\n test_open_metrics = imgui.test_engine.register_test(\n engine, \"demo_tests\", \"open_metrics\"\n )\n\n def test_func_metrics(ctx: imgui.test_engine.TestContext):\n ctx.set_ref(\"Dear ImGui Demo\")\n ctx.menu_check(\"Tools/Metrics\\\\/Debugger\")\n\n test_open_metrics.test_func = test_func_metrics\n\n # Capture entire Dear ImGui Demo window.\n test_capture = imgui.test_engine.register_test(\n engine, \"demo_tests\", \"capture_screenshot\"\n )\n\n def test_func_capture(ctx: imgui.test_engine.TestContext):\n ctx.set_ref(\"Dear ImGui Demo\")\n ctx.item_open(\"Widgets\") # Open collapsing header\n ctx.item_open_all(\"Basic\") # Open tree node and all its descendants\n ctx.capture_screenshot_window(\"Dear ImGui Demo\")\n\n test_capture.test_func = test_func_capture\n\n # Exit\n test_exit = imgui.test_engine.register_test(engine, \"demo_tests\", \"exit\")\n\n def test_func_exit(ctx: imgui.test_engine.TestContext):\n ctx.item_click(\"**/Exit\")\n\n test_exit.test_func = test_func_exit\n\n\n@immapp.static(idx_frame_count=0)\ndef queue_all_tests():\n static = queue_all_tests\n static.idx_frame_count += 1\n\n if static.idx_frame_count == 3:\n engine = hello_imgui.get_imgui_test_engine()\n test_io = imgui.test_engine.get_io(engine)\n test_io.config_run_speed = imgui.test_engine.TestRunSpeed.normal\n\n imgui.test_engine.queue_test(engine, test_open_metrics)\n imgui.test_engine.queue_test(engine, test_capture)\n imgui.test_engine.queue_test(engine, test_exit)\n\n\ndef app_gui():\n imgui.text(\"Hello\")\n\n if imgui.button(\"Exit\"):\n hello_imgui.get_runner_params().app_shall_exit = True\n\n imgui.show_demo_window()\n imgui.test_engine.show_test_engine_windows(\n hello_imgui.get_imgui_test_engine(), True\n )\n\n queue_all_tests()\n\n\ndef main() -> int:\n print(\"Starting ci_automation_test_app\")\n try:\n runner_params = hello_imgui.RunnerParams()\n runner_params.use_imgui_test_engine = True\n\n runner_params.callbacks.show_gui = app_gui\n runner_params.callbacks.register_tests = my_register_tests\n immapp.run(runner_params)\n except Exception as e:\n print(\"ERROR: exception in ci_automation_test_app\", e)\n return 1\n\n print(\"Exiting ci_automation_test_app with success!\\n\")\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"pthom/imgui_bundle","sub_path":".github/ci_automation_tests/ci_automation_test_app_bundle.py","file_name":"ci_automation_test_app_bundle.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","stars":394,"dataset":"github-code","pt":"81"} +{"seq_id":"6888121178","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 10 17:24:54 2021\n\n@author: bartm\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport sys\nimport math \nimport numpy as np\nimport pandas as pd\nimport wandb \nfrom torch._six import string_classes\nimport collections\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.cluster import KMeans\n\nclass gammaManager_Independant(nn.Module):\n def __init__(self):\n super(gammaManager_Independant, self).__init__()\n self.gammaParents = torch.tensor(0.0)\n self.gammaChildren = torch.tensor(0.0)\n self.timestep = torch.tensor(0.0)\n \n def composeLoss(self, Node):\n return Node.loss + self.gammaParents * Node.coupling_loss_Parents + self.gammaChildren * Node.coupling_loss_Children\n \n def updateGamma(self,Node):\n self.timestep +=1\n def reinitGamma(self, Node):\n self.gammaParents = torch.tensor(0.0).to(self.gammaParents.device)\n self.gammaChildren = torch.tensor(0.0).to(self.gammaChildren.device)\n def to_(self,device):\n self.gammaParents = self.gammaParents.to(device)\n self.gammaChildren = self.gammaChildren.to(device)\n self.timestep = self.timestep.to(device)\n \n \nclass gammaManager_Constant(nn.Module):\n def __init__(self, gammaParents,gammaChildren, startingTime=0.0):\n super(gammaManager_Constant, self).__init__()\n self.gammaParents_0 = torch.tensor(gammaParents)\n self.gammaChildren_0 = torch.tensor(gammaChildren)\n self.gammaParents = torch.tensor(0.0)\n self.gammaChildren = torch.tensor(0.0)\n self.startingTime = torch.tensor(startingTime)\n self.timestep = torch.tensor(0.0)\n \n def composeLoss(self, Node):\n return Node.loss + self.gammaParents * Node.coupling_loss_Parents + self.gammaChildren * Node.coupling_loss_Children\n \n def updateGamma(self, Node):\n if self.timestep < self.startingTime:\n self.gammaParents = torch.tensor(0.0).to(self.gammaParents.device)\n self.gammaChildren = torch.tensor(0.0).to(self.gammaParents.device)\n else:\n self.gammaParents = self.gammaParents_0\n self.gammaChildren = self.gammaChildren_0\n self.timestep+=1\n return self.gammaParents, self.gammaChildren\n # def reinitGamma(self, Node):\n # self.gammaParents = 0.0\n # self.gammaChildren = 0.0\n def to_(self, device):\n self.gammaParents_0 = self.gammaParents_0.to(device)\n self.gammaChildren_0 = self.gammaChildren_0.to(device)\n self.gammaParents = self.gammaParents.to(device)\n self.gammaChildren = self.gammaChildren.to(device)\n self.startingTime = self.startingTime.to(device)\n self.timestep = self.timestep.to(device)\n \nclass gammaManager_exponential(nn.Module):\n def __init__(self, startingTime, maxiter):\n super(gammaManager_exponential, self).__init__()\n self.gammaParents_0 = torch.tensor(0.0)\n self.gammaParents_1 = torch.tensor(0.0)\n self.timestep = torch.tensor(0.0)\n self.gammaChildren_0 = torch.tensor(0.0)\n self.gammaChildren_1 = torch.tensor(0.0)\n self.startingTime = startingTime\n self.maxiter = maxiter\n \n def composeLoss(self, Node):\n return Node.loss + self.gammaParents * Node.coupling_loss_Parents + self.gammaChildren * Node.coupling_loss_Children\n \n def updateGamma(self, Node):\n return\n \n def reinitGamma(self, Node):\n self.gammaParents_0 = Node.loss\n self.gammaChildren = torch.tensor(0.0)\n \n \n \n \n \nclass gammaManager_Linear(nn.Module):\n def __init__(self, startingTime, maxiter, finalsplit):\n super(gammaManager_Linear, self).__init__()\n self.gammaParents_0 = torch.tensor(0.0)\n self.gammaParents = torch.tensor(0.0)\n self.timestep = torch.tensor(0.0)\n self.gammaChildren_0 = torch.tensor(0.0)\n self.gammaChildren = torch.tensor(0.0)\n self.startingTime = torch.tensor(startingTime)\n self.maxiter = torch.tensor(maxiter)\n self.finalsplit = torch.tensor(finalsplit)\n \n def composeLoss(self, Node):\n return Node.loss + self.gammaParents * Node.coupling_loss_Parents + self.gammaChildren * Node.coupling_loss_Children\n \n def updateGamma(self, Node):\n if self.timestep < self.startingTime:\n self.gammaParents = torch.tensor(0.0)\n self.gammaChildren = torch.tensor(0.0)\n elif self.timestep == self.startingTime:\n self.reinitGamma(Node)\n else:\n self.gammaParents = self.gammaParents_0 * (torch.min(self.timestep,self.maxiter) - self.startingTime)\n self.gammaChildren = self.gammaChildren_0 * (torch.min(self.timestep,self.maxiter) - self.startingTime)\n self.timestep+=1\n return self.gammaParents, self.gammaChildren\n \n def reinitGamma(self, Node):\n \n if Node.isRoot:\n self.gammaChildren_0 = self.finalsplit* (Node.loss.clone().detach()/Node.coupling_loss_Children.clone().detach()) / (self.maxiter - self.startingTime)\n elif Node.isLeaf:\n self.gammaParents_0 =self.finalsplit* (Node.loss.clone().detach()/Node.coupling_loss_Parents.clone().detach()) / (self.maxiter - self.startingTime)\n else:\n self.gammaParents_0 =self.finalsplit* (Node.loss.clone().detach()/Node.coupling_loss_Parents.clone().detach()) / (self.maxiter - self.startingTime)\n self.gammaChildren_0 = self.finalsplit* (Node.loss.clone().detach()/Node.coupling_loss_Children.clone().detach()) / (self.maxiter - self.startingTime)\n \n def to_(self,device):\n self.gammaParents_0 = self.gammaParents_0.to(device)\n self.gammaParents = self.gammaParents.to(device)\n self.timestep = self.timestep.to(device)\n self.gammaChildren_0 = self.gammaChildren_0.to(device)\n self.gammaChildren = self.gammaChildren.to(device)\n self.startingTime = self.startingTime.to(device)\n self.maxiter = self.maxiter.to(device)\n self.finalsplit = self.finalsplit.to(device)\n\n \n \n \n \n \nclass Callback_SimpleLossSaver():\n def __init__(self):\n self.trainingloss = []\n self.testingloss = []\n self.validationloss = []\n \n def updatetrain(self,Node):\n self.trainingloss.append(Node.loss.item())\n def updatetest(self,Node):\n self.testingloss.append(Node.loss.item())\n def updateval(self,Node):\n self.validationloss.append(Node.loss.item())\n \nclass Callback_WandBSimpleLossSaver():\n def __init__(self, project):\n import wandb\n print(\"wandb imported\")\n wandb.login()\n print(\"wandb login\")\n wandb.init(project=project, entity=\"barthelemymp\")\n self.config_dict = {\n }\n \n self.trainingloss = []\n self.testingloss = []\n self.validationloss = []\n \n def pushConfig(self, config=None):\n if config==None:\n wandb.config.update(self.config_dict)\n else:\n self.config_dict = config\n wandb.config.update(self.config_dict) \n \n def updateConfig(self, key, value):\n self.config_dict[key] = value\n self.pushConfig()\n\n def updatetrain(self,Node, recursive=True):\n wandb.log({\"Train loss\"+Node.Name: Node.loss.item(), \n \"epoch\":Node.gammaManager.timestep, \n \"gamma parents\"+Node.Name:Node.gammaManager.gammaParents, \n \"gamma children\"+Node.Name:Node.gammaManager.gammaChildren,\n \"distance Loss\"+Node.Name:Node.coupling_loss_Parents\n })\n if Node.isLeaf==False:\n if recursive:\n for i in range(len(Node.children)):\n child = Node.children[i]\n self.updatetrain(child)\n\n def updatetest(self, Node, recursive=True):\n wandb.log({\"Test loss\"+Node.Name: Node.loss.item(), \"epoch\":Node.gammaManager.timestep})\n if Node.isLeaf==False:\n if recursive:\n for i in range(len(Node.children)):\n child = Node.children[i]\n self.updatetest(child)\n \n def updateval(self, Node, recursive=True):\n wandb.log({\"Val loss\"+Node.Name: Node.loss.item(), \"epoch\":Node.gammaManager.timestep})\n if Node.isLeaf==False:\n if recursive:\n for i in range(len(Node.children)):\n child = Node.children[i]\n self.updateval(child)\n \n def updatevalonChildren(self, Node, recursive=True):\n for i in range(len(Node.children)):\n loss = Node.LossFunction(Node.model, Node.children[i].batch)\n wandb.log({\"Val loss\"+Node.Name+\" on \"+Node.children[i].Name: Node.loss.item(), \"epoch\":Node.gammaManager.timestep})\n if recursive:\n for i in range(len(Node.children)):\n if self.children[i].isLeaf==False:\n self.updatevalonChildren(Node.children[i],recursive=True)\n \n \n# class gammaManager_Selflearning(nn.Module):\n# def __init__(self, startingTime, maxiter):\n# super(gammaManager_selflearning, self).__init__()\n \n# def composeLoss(self, Node):\n# return Node.loss + self.gammaParents * Node.coupling_loss_Parents + self.gammaChildren * Node.coupling_loss_Children\n \n# def updateGamma(self):\n# for center_parameters, replica_parameters in zip(self.model.parameters(), self.parent.model.parameters()):\n# self.coupling_loss_Parents += loss_fn_elastic(center_parameters, replica_parameters)\n# return self.gammaParents, self.gammaChildren\n# def reinitGamma(self, Node, finalsplit):\n\n \n\nclass PhyloNode():#nn.Module\n def __init__(self, \n model, \n optimizer, \n LossFunction, \n parent=None, \n children=[], \n dataset = None, \n tuplesize=1, \n batch_size=32, \n gammaManager = gammaManager_Independant(),\n # callback=Callback_SimpleLossSaver(), \n Name=\"Root\"\n ):\n # super(PhyloNode, self).__init__()\n self.model = model\n self.Name = Name\n self.optimizer = optimizer\n self.parent = parent\n self.children = children\n self.isLeaf = len(children)==0\n self.isRoot = parent==None\n self.dataset = dataset\n self.batch_size = batch_size\n if dataset is not None:\n if isinstance(dataset, list):\n self.train_set = dataset[0]\n self.test_set = dataset[1]\n self.val_set = dataset[2]\n else:\n trainL = int(0.8 * len(dataset))\n testL = int(0.1 * len(dataset))\n valL = len(dataset) - trainL -testL\n self.train_set, self.test_set, self.val_set = torch.utils.data.random_split(dataset, [trainL, testL, valL])\n self.train_iterator = iter(DataLoader(self.train_set, batch_size=batch_size, shuffle=True))\n self.test_iterator = iter(DataLoader(self.test_set, batch_size=batch_size, shuffle=True))\n self.val_iterator = iter(DataLoader(self.val_set, batch_size=batch_size, shuffle=True))\n self.gammaManager = gammaManager\n\n self.batch = None\n self.tuplesize = tuplesize\n self.LossFunction = LossFunction\n \n self.isAttractedBychildren = False\n self.isAttractedByParent = False\n \n self.coupling_loss_Parents = torch.tensor(0.0)\n self.coupling_loss_Children = torch.tensor(0.0)\n self.loss = torch.tensor(0.0)\n \n def kmeansSplit(self,K):\n assert self.dataset !=None\n kmeans = KMeans(n_clusters=K, random_state=0).fit(self.dataset.sequences)\n \n self.train_set = torch.utils.data.Subset(self.dataset, kmeans.labels_<=K-3)\n self.test_set = torch.utils.data.Subset(self.dataset, kmeans.labels_==K-2)\n self.val_set = torch.utils.data.Subset(self.dataset, kmeans.labels_==K-1)\n\n \n def getTrainLength(self): \n if self.isLeaf:\n return int(0.8 * len(self.dataset))#### error if kmeans TO DO\n else: \n l = 0\n for i in range(len(self.children)):\n l+=self.children[i].getTrainLength()\n return l\n \n def getTestLength(self): \n if self.isLeaf:\n return int(0.1 * len(self.dataset))\n else: \n l = 0\n for i in range(len(self.children)):\n l+=self.children[i].getTestLength()\n return l\n \n def getValLength(self): \n if self.isLeaf:\n trainL = int(0.8 * len(self.dataset))\n testL = int(0.1 * len(self.dataset))\n valL = len(self.dataset) - trainL -testL\n return valL\n else: \n l = 0\n for i in range(len(self.children)):\n l+=self.children[i].getValLength()\n return l\n \n \n\n def getNewTrainBatch(self, fullBatch=False):\n if self.isLeaf:\n if fullBatch == False:\n try:\n self.batch = next(self.train_iterator)\n except StopIteration:\n # StopIteration is thrown if dataset ends\n # reinitialize data loader \n self.train_iterator = iter(DataLoader(self.train_set, batch_size=self.batch_size, shuffle=True))\n self.batch = next(self.train_iterator)\n else:\n self.batch = self.train_set[:]\n else:\n batchlist = []\n for j in range(self.tuplesize):\n batchlist.append([])\n for i in range(len(self.children)):\n child = self.children[i]\n ba = child.getNewTrainBatch(fullBatch=fullBatch)\n for j in range(self.tuplesize):\n batchlist[j].append(ba[j])\n \n seqs = torch.cat(batchlist[0], dim=0)\n self.batch = (seqs,)\n for j in range(1,self.tuplesize):\n self.batch += (torch.cat(batchlist[j], dim=0),)\n # print(self.Name, self.batch[0].shape, self.batch[1].shape)\n return self.batch\n \n \n def getNewTestBatch(self, fullBatch=False):\n if self.isLeaf:\n if fullBatch == False:\n try:\n self.batch = next(self.test_iterator)\n except StopIteration:\n # StopIteration is thrown if dataset ends\n # reinitialize data loader \n self.test_iterator = iter(DataLoader(self.test_set, batch_size=self.batch_size, shuffle=True))\n self.batch = next(self.test_iterator)\n else:\n self.batch = self.test_set[:]\n else:\n batchlist = []\n for j in range(self.tuplesize):\n batchlist.append([])\n for i in range(len(self.children)):\n child = self.children[i]\n ba = child.getNewTestBatch(fullBatch=fullBatch)\n for j in range(self.tuplesize):\n batchlist[j].append(ba[j])\n \n seqs = torch.cat(batchlist[0], dim=0)\n self.batch = (seqs,)\n for j in range(1,self.tuplesize):\n self.batch += (torch.cat(batchlist[j], dim=0),)\n return self.batch\n \n def getNewValBatch(self, fullBatch=False):\n if self.isLeaf:\n if fullBatch == False:\n try:\n self.batch = next(self.val_iterator)\n except StopIteration:\n # StopIteration is thrown if dataset ends\n # reinitialize data loader \n self.val_iterator = iter(DataLoader(self.val_set, batch_size=self.batch_size, shuffle=True))\n self.batch = next(self.val_iterator)\n self.batch = next(self.val_iterator)\n else:\n self.batch = self.val_set[:]\n else:\n batchlist = []\n for j in range(self.tuplesize):\n batchlist.append([])\n for i in range(len(self.children)):\n child = self.children[i]\n ba = child.getNewValBatch(fullBatch=fullBatch)\n for j in range(self.tuplesize):\n batchlist[j].append(ba[j])\n \n seqs = torch.cat(batchlist[0], dim=0)\n self.batch = (seqs,)\n for j in range(1,self.tuplesize):\n self.batch += (torch.cat(batchlist[j], dim=0),)\n return self.batch\n \n def computeLoss(self, recursive=True):\n self.loss = self.LossFunction(self.model, self.batch)\n if recursive:\n for i in range(len(self.children)):\n self.children[i].computeLoss(recursive=True)\n \n def evalmode(self,recursive=True):\n self.model.eval()\n if recursive:\n for i in range(len(self.children)):\n self.children[i].evalmode(recursive=recursive)\n \n def trainmode(self,recursive=True):\n self.model.train()\n if recursive:\n for i in range(len(self.children)):\n self.children[i].trainmode(recursive=recursive)\n \n def zero_grad(self,recursive=True):\n self.optimizer.zero_grad()\n if recursive:\n for i in range(len(self.children)):\n self.children[i].optimizer.zero_grad()\n \n def set_isAttractedBychildren(self, newValue, recursive=True):\n \n if self.isLeaf:\n self.isAttractedBychildren = False\n else:\n self.isAttractedBychildren = newValue\n if recursive:\n for i in range(len(self.children)):\n self.children[i].set_isAttractedBychildren(newValue, recursive=True)\n\n def set_isAttractedByParent(self, newValue, recursive=True):\n \n if self.isRoot:\n self.isAttractedByParent = False\n else:\n self.isAttractedByParent = newValue\n if recursive:\n for i in range(len(self.children)):\n self.children[i].set_isAttractedByParent(newValue, recursive=True)\n \n def computeCouplingLossParent(self, recursive=True):\n loss_fn_elastic = torch.nn.MSELoss(reduction='sum')\n self.coupling_loss_Parents = torch.zeros_like(self.coupling_loss_Parents)\n if self.isRoot==False:\n for center_parameters, replica_parameters in zip(self.model.parameters(), self.parent.model.parameters()):\n self.coupling_loss_Parents += loss_fn_elastic(center_parameters, replica_parameters.clone().detach())\n if recursive:\n for i in range(len(self.children)):\n self.children[i].computeCouplingLossParent(recursive=True)\n return self.coupling_loss_Parents\n\n \n def computeCouplingLossChildren(self, recursive=True):\n \n loss_fn_elastic = torch.nn.MSELoss(reduction='sum')\n self.coupling_loss_Children = torch.zeros_like(self.coupling_loss_Children)\n if self.isLeaf==False:\n for i in range(len(self.children)):\n child = self.children[i]\n for center_parameters, replica_parameters in zip(self.model.parameters(), child.model.parameters()):\n self.coupling_loss_Children += loss_fn_elastic(center_parameters, replica_parameters.clone().detach())\n if recursive:\n for i in range(len(self.children)):\n self.children[i].computeCouplingLossChildren(recursive=True)\n return self.coupling_loss_Children\n \n def addChildren(self, child):\n child.isRoot = False\n self.isLeaf = False\n self.children.append(child)\n child.parent = self\n \n def addParent(self, parent):\n assert (self.isRoot),\"Already has a parent\"\n self.parent = parent\n self.isRoot =False\n parent.isLeaf = False\n parent.children.append(self)\n \n def trainingStep(self, recursive=True):\n # self.getnewTrainBatch()\n # self.trainmode(recursive=recursive)\n # self.computeLoss(recursive=recursive)\n # self.coupling_loss_Children(recursive=recursive)\n # self.coupling_loss_Parents(recursive=recursive)\n self.gammaManager.updateGamma(self)\n self.zero_grad(recursive=False)\n Totalloss = self.gammaManager.composeLoss(self)\n # print(\"totaloss\", Totalloss)\n Totalloss.backward()\n #torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1)\n # print(self.Name)\n # self.optimizer.step()\n # self.callback.updatetrain(self)\n \n if recursive:\n for i in range(len(self.children)):\n self.children[i].trainingStep(recursive=recursive)\n def optimizerstep(self,recursive=True):\n self.optimizer.step()\n if recursive:\n for i in range(len(self.children)):\n self.children[i].optimizerstep(recursive=recursive)\n def to_(self, device, recursive=True):\n self.model = self.model.to(device)\n if self.dataset is not None:\n self.train_set.dataset.to_(device)\n self.test_set.dataset.to_(device)\n self.val_set.dataset.to_(device)\n self.train_iterator = iter(DataLoader(self.train_set, batch_size=self.batch_size, shuffle=True))\n self.test_iterator = iter(DataLoader(self.test_set, batch_size=self.batch_size, shuffle=True))\n self.val_iterator = iter(DataLoader(self.val_set, batch_size=self.batch_size, shuffle=True))\n self.gammaManager.to_(device)\n self.coupling_loss_Parents = self.coupling_loss_Parents.to(device)\n self.coupling_loss_Children = self.coupling_loss_Children.to(device)\n self.loss = self.loss.to(device)\n if recursive:\n for i in range(len(self.children)):\n self.children[i].to_(device, recursive=recursive)\n \n \n # def testingStep(self, recursive=True):\n # # self.getnewTrainBatch()\n # # self.trainmode(recursive=recursive)\n # # self.computeLoss(recursive=recursive)\n # # self.coupling_loss_Children(recursive=recursive)\n # # self.coupling_loss_Parents(recursive=recursive)\n # # self.gammaManager.updateGamma()\n # # self.gammaManager.timestep += 1\n # # self.zero_grad(self,recursive=True)\n # # Totalloss = self.gammaManager.composeLoss(self)\n # # Totalloss.backward()\n # # torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1) \n # # self.optimizer.step()\n # self.callback.updatetest(self)\n # if recursive:\n # for i in range(len(self.children)):\n # self.children[i].trainingStep(recursive=recursive)\n\n \n \n \n","repo_name":"barthelemymp/phyloreplica","sub_path":"src/PhyloTrees.py","file_name":"PhyloTrees.py","file_ext":"py","file_size_in_byte":23236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6354002844","text":"f = open(\"input.txt\")\n\nclass monkey:\n def __init__(self, index, items, v1, op, v2, test, true, false):\n self._index = index\n self._items = items\n self._v1 = v1\n self._op = op\n self._v2 = v2\n self._test = test\n self._true = true\n self._false = false\n \n self._score = 0\n \n @property\n def empty(self):\n return len(self._items) == 0\n \n def pop(self):\n if not self._items:\n return None\n \n level = self._items[0]\n del self._items[0]\n \n v1 = level\n if self._v1 != \"old\":\n v1 = int(self._v1)\n \n v2 = level\n if self._v2 != \"old\":\n v2 = int(self._v2)\n \n if self._op == \"+\":\n level = v1 + v2\n elif self._op == \"-\":\n level = v1 - v2\n elif self._op == \"*\":\n level = v1 * v2\n elif self._op == \"/\":\n level = v1 / v2\n \n level = level // 3\n \n if (level % self._test) == 0:\n new_monkey = self._true\n else:\n new_monkey = self._false\n \n self._score += 1\n \n return (level, new_monkey)\n \n \n def push(self, level):\n self._items.append(level)\n \n @property\n def score(self):\n return self._score\n \n def __repr__(self):\n return \"[%d]: score %d items %s\" % (self._index, self._score, str(self._items))\n \nmonkeys = []\n\nm = None\nindex = None\nitems = None\nv1 = None\nop = None\nv2 = None\ntest = None\ntrue = None\nfalse = None\n\nfor line in f:\n line = line.strip()\n print(line)\n \n if not line:\n print(\"m[%d] items=%s new = %s %s %s, (div by %d) ? %d : %d\" % (index, items, v1, op, v2, test, true, false))\n m = monkey(index, items, v1, op, v2, test, true, false)\n monkeys.append(m)\n index = None\n continue\n \n if line.startswith(\"Monkey \"):\n ignore, index = line.split()\n index = index[:-1]\n index = int(index)\n \n continue\n \n if line.startswith(\"Starting items: \"):\n items = [int(x) for x in line[16:].split(\", \")]\n continue\n \n if line.startswith(\"Operation: new = \"):\n v1, op, v2 = line[17:].split()\n continue\n \n \n if line.startswith(\"Test: divisible by \"):\n test = int(line[19:])\n continue\n \n if line.startswith(\"If true: throw to monkey \"):\n true = int(line[25:])\n continue\n \n if line.startswith(\"If false: throw to monkey \"):\n false = int(line[26:])\n continue\n\nif index is not None:\n print(\"m[%d] items=%s new = %s %s %s, (div by %d) ? %d : %d\" % (index, items, v1, op, v2, test, true, false))\n m = monkey(index, items, v1, op, v2, test, true, false)\n monkeys.append(m)\n\nfor r in range(20):\n for i in range(len(monkeys)):\n m = monkeys[i]\n \n while not m.empty:\n level, new_monkey = m.pop()\n \n monkeys[new_monkey].push(level)\n \n print(\"Round: %d\" % r)\n scores = []\n for i in range(len(monkeys)):\n print(monkeys[i])\n scores.append(monkeys[i].score)\n \n scores = sorted(scores)\n print(\"Result %d\" % (scores[-1] * scores[-2]))\n ","repo_name":"newdimm/adventofcode","sub_path":"2022/11/11_01.py","file_name":"11_01.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33604055146","text":"from tkinter import *\r\nfrom random_lines import text\r\nimport random\r\n\r\n\r\n\r\n\r\nrandom_texts=random.choice(text)\r\nprint(len(random_texts))\r\n\r\n\r\n\r\n\r\n\r\n# creating timer\r\ndef countdown():\r\n\r\n start_timer(60)\r\ndef start_timer(count):\r\n timer_left.config(text=f\":{count}\")\r\n if count > 0:\r\n window.after(1000,start_timer, count-1)\r\n elif count==0:\r\n total_words = len(typing_box.get(\"1.0\",END))\r\n WPM = total_words / 5\r\n WPM_count.config(text=f\"{WPM}\")\r\n\r\n# creating GUI\r\nwindow=Tk()\r\nwindow.title(\"The Typing speed testing machine\")\r\nwindow.config(width=800, height=500)\r\n\r\nbutton=Button(text=\"Restart\", highlightthickness=0, command=countdown)\r\nbutton.place(x=400, y=150)\r\nWPM_count=Label(text=\"00\")\r\nWPM_count.place(x=360, y=150)\r\n\r\n\r\ntimer_left=Label(text=f\":60\")\r\ntimer_left.place(x=220, y=150)\r\ntyping_box=Text(width=50, height=60)\r\ntyping_box.place(x=150, y= 270)\r\n\r\nstart_timer(60)\r\nWPM_label=Label(text=\"WPM\")\r\nWPM_label.place(x=320, y=150)\r\ntimer=Label(text=\"Time left\")\r\ntimer.place(x=150, y=150)\r\nlines_Label=Label(text=random_texts)\r\nlines_Label.place(x=10, y=200)\r\n\r\n\r\n\r\n\r\n\r\n\r\nwindow.mainloop()","repo_name":"Sandeepsandygetit/Personal-projects","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27889139389","text":"import csv\nfrom rdkit.Chem.Scaffolds.MurckoScaffold import MurckoScaffoldSmiles\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.DataStructs import TanimotoSimilarity\n\ndef read_pubchem_smiles(data_path: str):\n smiles_data = []\n with open(data_path) as f:\n csv_reader = csv.reader(f, delimiter=',')\n for idx, row in enumerate(csv_reader):\n smiles = row[-1]\n smiles_data.append(smiles)\n return smiles_data\n\n\ndef _generate_scaffold(smiles, include_chirality=False):\n mol = Chem.MolFromSmiles(smiles)\n scaffold = MurckoScaffoldSmiles(mol=mol, includeChirality=include_chirality)\n return scaffold\n\n\ndef generate_scaffolds(dataset, log_every_n=1000):\n scaffolds = {}\n data_len = len(dataset)\n print(f'Total Data Size {data_len}, about to generate scaffolds')\n for idx, smiles in enumerate(dataset):\n if idx % log_every_n == 0:\n print(\"Generating scaffold %d/%d\"%(idx, data_len))\n scaffold = _generate_scaffold(smiles)\n if scaffold not in scaffolds:\n scaffolds[scaffold] = [idx]\n else:\n scaffolds[scaffold].append(idx)\n\n scaffolds = {k: sorted(v) for k, v in scaffolds.items()}\n scaffold_sets = [(s, s_set) for (s, s_set) in sorted(scaffolds.items(),\n key=lambda x: (len(x[1]), x[1][0]), reverse=True)]\n return scaffold_sets\n\ndef molecular_distance(a: Chem.Mol, b:Chem.Mol, as_similarity=False):\n eps = 1e-4\n a, b = [AllChem.GetMorganFingerprint(Chem.AddHs(m), 2, useFeatures=True) for m in [a, b]]\n fp_similarity = TanimotoSimilarity(a, b)\n if as_similarity:\n return fp_similarity\n \n # distance = 1/(eps+fp_similarity)\n distance = 1- fp_similarity\n return distance","repo_name":"JongKook-Heo/DimensionalityReduction","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9675480175","text":"from tkinter import *\n\nroot = Tk()\nroot.title('Ventana de botones')\nroot.geometry(\"300x300\")\n\nlabel = Label(root,text=\"boton clickado\")\ndef click():\n ##instanciar Label dentro de la funcion crear una label por cada click, una solucion es instanciarla arriba\n ##(fuera de la funcion) y hacer el label.pack en la funcion, eso hace que la etiqueta solo aparezca una \n ##vez y no una etiqueta distinta cada vez que se da click en el boton\n #label = Label(root,text=\"boton clickado\")\n label.pack()\n\n#Argumentos de Button(dondeRenderizar, tituloBoton, funcionEjecutar, colorTexto,colorFondo)\n ##bg y fg acepta colores escritos red, yellow... y hexadecimal. ffff00 es amarillo, por eso tiene fondo amarillo.\nbtn = Button(root, text=\"clickMe\",command=click,fg=\"red\",bg=\"#ffff00\")\nbtn.pack()\n\nroot.mainloop()","repo_name":"andrac275/pythonStuff","sub_path":"19_Aplicaciones de escritorio con TkInter/01_buttons.py","file_name":"01_buttons.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74960362185","text":"__author__ = 'aymgal'\n\nfrom coolest.template.classes.lensing_entity import LensingEntity\nfrom coolest.template.classes.mass_light_model import MassModel\nfrom coolest.template.classes import util\n\n\nclass MassField(LensingEntity):\n \"\"\"Generic field of massive objects, for instance an external shear field.\n\n Parameters\n ----------\n name : str\n Name associated to that shear component.\n redshift : float\n Redshift associated to that shear component, if needed.\n mass_model : MassModel, optional\n Mass model of the field, by default None\n \"\"\"\n\n def __init__(self,\n name: str,\n redshift: float,\n mass_model: MassModel = None) -> None:\n super().__init__(name, redshift, mass_model=mass_model)\n ","repo_name":"aymgal/COOLEST","sub_path":"coolest/template/classes/mass_field.py","file_name":"mass_field.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"9371114748","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport glob\nimport functools\n\nimport tensorflow as tf \n\n\nNUMBER_TRAIN_DATA = 1464\nNUMBER_VAL_DATA = 1449\nNUMBER_TRAINVAL_DATA = 2913\nINPUT_SIZE = [384, 384]\n\n\ndef decode(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image/encoded': tf.FixedLenFeature([], tf.string, default_value=''),\n 'image/filename': tf.FixedLenFeature([], tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature([], tf.string, default_value=''),\n 'image/height': tf.FixedLenFeature([], tf.int64, default_value=0),\n 'image/width': tf.FixedLenFeature([], tf.int64, default_value=0),\n 'image/channel': tf.FixedLenFeature([], tf.int64, default_value=3),\n 'label/encoded': tf.FixedLenFeature([], tf.string, default_value=''),\n 'label/format': tf.FixedLenFeature([], tf.string, default_value=''),\n }\n )\n\n image = tf.image.decode_jpeg(features['image/encoded'], channels=3)\n label = tf.image.decode_png(features['label/encoded'], channels=1)\n filename = features['image/filename']\n height = features['image/height']\n width = features['image/width']\n\n input_dict = {\n 'image': image,\n 'label': label,\n 'filename': filename,\n 'height': height,\n 'width': width,\n }\n\n return input_dict\n\n\ndef shift_image(image, label, width_shift_range, height_shift_range):\n \"\"\"This fn will perform the horizontal or vertical shift\"\"\"\n if width_shift_range or height_shift_range:\n if width_shift_range:\n width_shift_range = tf.random_uniform(\n [],\n -width_shift_range * INPUT_SIZE[1],\n width_shift_range * INPUT_SIZE[1])\n if height_shift_range:\n height_shift_range = tf.random_uniform(\n [],\n -height_shift_range * INPUT_SIZE[0],\n height_shift_range * INPUT_SIZE[0])\n # Translate both \n image = tf.contrib.image.translate(\n image, [width_shift_range, height_shift_range])\n label = tf.contrib.image.translate(\n label, [width_shift_range, height_shift_range])\n\n return image, label\n\n\ndef flip_image(horizontal_flip, image, label):\n if horizontal_flip:\n flip_prob = tf.random_uniform([], 0.0, 1.0)\n image, label = tf.cond(\n tf.less(flip_prob, 0.5),\n lambda: (tf.image.flip_left_right(image),\n tf.image.flip_left_right(label)),\n lambda: (image, label))\n\n return image, label\n\n\ndef normalize(image):\n \"\"\"Normalize image to [-1, 1]\"\"\"\n image = (2.0 / 255.0) * tf.to_float(image) - 1.0\n\n return image\n\n\ndef augment(input_dict,\n is_vis,\n resize=None, \n hue_delta=0, \n horizontal_flip=False,\n width_shift_range=0,\n height_shift_range=0):\n \"\"\"Data augmentation and preprocessing.\n\n Args:\n input_dict: input_dict.\n is_vis: boolean indicating whether in visualization mode.\n resize: Resize the image to some size e.g. [512, 512]\n hue_delta: Adjust the hue of an RGB image by random factor\n horizontal_flip: Random left right flip,\n width_shift_range: Randomly translate the image horizontally\n height_shift_range: Randomly translate the image vertically \n\n Returns:\n input_dict.\n \"\"\"\n image = input_dict['image']\n label = input_dict['label']\n\n if is_vis:\n input_dict['original_image'] = image\n\n if resize is not None:\n image = tf.image.resize_images(\n image, resize, align_corners=True, \n method=tf.image.ResizeMethod.BILINEAR)\n label = tf.image.resize_images(\n label, resize, align_corners=True, \n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n \n if hue_delta:\n image = tf.image.random_hue(image, hue_delta)\n \n image, label = flip_image(horizontal_flip, image, label)\n image, label = shift_image(\n image, label, width_shift_range, height_shift_range)\n\n image = normalize(image)\n\n input_dict['image'] = image\n input_dict['label'] = label\n \n return input_dict\n\n\ntrain_config = {\n 'is_vis': False,\n 'resize': [INPUT_SIZE[0], INPUT_SIZE[1]],\n 'hue_delta': 0.1,\n 'horizontal_flip': True,\n 'width_shift_range': 0.1,\n 'height_shift_range': 0.1\n}\ntrain_preprocessing_fn = functools.partial(augment, **train_config)\n\nval_config = {\n 'is_vis': False,\n 'resize': [INPUT_SIZE[0], INPUT_SIZE[1]],\n}\nval_preprocessing_fn = functools.partial(augment, **val_config)\n\nvis_config = { \n 'is_vis': True,\n 'resize': [INPUT_SIZE[0], INPUT_SIZE[1]],\n}\nvis_preprocessing_fn = functools.partial(augment, **vis_config)\n\n\ndef inputs(tfrecord_folder, dataset_split, is_training, is_vis,\n batch_size, num_epochs=None):\n filename = os.path.join(tfrecord_folder, dataset_split + '.tfrecord')\n\n with tf.name_scope('input'):\n dataset = tf.data.TFRecordDataset(filename)\n\n dataset = dataset.map(decode)\n\n if is_training:\n dataset = dataset.map(train_preprocessing_fn)\n min_queue_examples = int(NUMBER_TRAIN_DATA * 0.2)\n dataset = dataset.shuffle(\n buffer_size=min_queue_examples + 3 * batch_size)\n elif is_vis:\n dataset = dataset.map(vis_preprocessing_fn)\n else:\n dataset = dataset.map(val_preprocessing_fn)\n\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(buffer_size=batch_size)\n\n iterator = dataset.make_one_shot_iterator()\n\n return iterator.get_next()\n","repo_name":"hhwxxx/fcn_tensorflow","sub_path":"input_pipeline.py","file_name":"input_pipeline.py","file_ext":"py","file_size_in_byte":5859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7224206917","text":"import re\nfrom typing import Collection\nimport numpy as np\n\ndef prepare(col):\n \"\"\"\n This function converts a panda series into a new one with the data converted into more \"programming-friendly\" strings in order to ease data analysis.\n Args:\n col(pandas series): panda data series that we want to convert.\n return:\n list of elements converted into lower case, no spaces no the sides and replaces the spaces in between the words with a low bar(_).\n \"\"\" \n return [i.lower().strip().replace(\" \",\"_\") for i in list(col)]\n\n\n\ndef strdata(col):\n \"\"\"\n This function casts data to string in order to ease its analysis.\n Args:\n col(pandas series): panda data series that we want to convert.\n return:\n list of elements casted into string.\n \"\"\"\n return [str(i) for i in col]\n\n\ndef unify(data,findthis,replaceforthis):\n \"\"\"\n It finds a chosen word contained in a string and replaces it for another one, making it easier to unify categories.\n Args:\n Arg1(data):pandas data series (or column in a dataframe) previously casted into string.\n Arg2(findthis): chosen word that we want to look for in Arg1. All that contains this word will be taken into account.\n Arg3(replaceforthis): chosen word that we want to get instead of Arg2 word.\n return:\n list of words replaced with the word Arg3.\n \"\"\"\n new = []\n for i in list(data):\n if findthis in i:\n new.append(re.sub(f'(\\S+|{findthis}.*)',str(replaceforthis),i))\n else:\n new.append(i)\n return new\n\ndef fatal(col):\n \"\"\"\n It unifies the values in a column where you only find \"y\" or \"n\" and useless values, creating a NaN in the ones that are not \"y\" or \"n\".\n Args:\n col(pandas series): panda data series (or column in a dataframe) previously casted into string.\n return:\n list of values as \"y\", \"n\" or NaN.\n \"\"\"\n for i in col:\n if i == \"y\":\n return \"yes\"\n elif i == \"n\":\n return \"no\"\n else:\n return np.nan\n\ndef act(col):\n \"\"\"\n This function replaces the activities that have a minor importance for the string \"other_activities\".\n Args:\n col(pandas series): panda data series (or column in a dataframe) previously casted into string.\n return:\n list of converted data that is not part of a previously set relevant category into the string \"other_activities\".\n \"\"\"\n for activity in col:\n if col == \"swimming\":\n return \"swimming\"\n elif col == \"surfing\":\n return \"surfing\"\n elif col == \"fishing\":\n return \"fishing\"\n elif col == \"boat\":\n return \"boat\"\n elif col == None:\n pass\n else:\n return \"other_activities\"\n\ndef getmonth(col):\n \"\"\"\n It takes into account the positions where the month is expressed and converts them into the \"whole\" month word.\n Args:\n col(pandas series): panda data series (or column in a dataframe) previously casted into string.\n return:\n list of months expressed in a complete word. If no \"month word\" is found, it returns NaN.\n \"\"\"\n for month in col:\n if col[3:6] == \"jan\" or \"jan\" in col:\n return \"january\"\n elif col[3:6] == \"feb\" or \"feb\" in col:\n return \"february\"\n elif col[3:6] == \"mar\" or \"mar\" in col:\n return \"march\"\n elif col[3:6] == \"apr\" or \"apr\" in col:\n return \"april\"\n elif col[3:6] == \"may\" or \"may\" in col:\n return \"may\"\n elif col[3:6] == \"jun\" or \"jun\" in col:\n return \"june\"\n elif col[3:6] == \"jul\" or \"jul\" in col:\n return \"july\"\n elif col[3:6] == \"aug\" or \"aug\" in col:\n return \"august\"\n elif col[3:6] == \"sep\" or \"sep\" in col:\n return \"september\"\n elif col[3:6] == \"oct\" or \"oct\" in col:\n return \"october\"\n elif col[3:6] == \"nov\" or \"nov\" in col:\n return \"november\"\n elif col[3:6] == \"dec\" or \"dec\" in col:\n return \"december\"\n else:\n return np.nan\n\ndef australia(col):\n \"\"\"\n This function takes a column and converts the strings not equal to australia into Nan.\n Args:\n col(pandas series): panda data series (or column in a dataframe) previously casted into string.\n return:\n list of data with strings equal to \"australia\" or otherwise NaN.\n \"\"\"\n for country in col:\n if col == \"australia\":\n return \"australia\"\n else:\n return np.nan\n\ndef season(col):\n \"\"\"\n It takes the months words and converts them into the corresponding season name in the southern hemisphere. If no month words are found, it creates NaNs.\n Args:\n col(pandas series): panda data series (or column in a dataframe) previously casted into string.\n return:\n name of the corresponding season in the souther hemisphere. If no month words are found, it creates NaNs.\n \"\"\"\n for i in str(col):\n if col == \"december\":\n return \"summer\"\n elif col == \"january\":\n return \"summer\"\n elif col == \"february\":\n return \"summer\"\n elif col == \"march\":\n return \"autumn\"\n elif col == \"april\":\n return \"autumn\"\n elif col == \"may\":\n return \"autumn\"\n elif col == \"june\":\n return \"winter\"\n elif col == \"july\":\n return \"winter\"\n elif col == \"august\":\n return \"winter\"\n elif col == \"september\":\n return \"spring\"\n elif col == \"october\":\n return \"spring\"\n elif col == \"november\":\n return \"spring\"\n else:\n return np.nan\n\ndef actnan(col):\n \"\"\"\n This function replaces the activities that have a minor importance for NaNs.\n Args:\n col(pandas series): panda data series (or column in a dataframe) previously casted into string.\n return:\n list of converted data that is not part of a previously set relevant category into NaNs.\n \"\"\"\n for activity in col:\n if col == \"swimming\":\n return \"swimming\"\n elif col == \"surfing\":\n return \"surfing\"\n elif col == \"fishing\":\n return \"fishing\"\n elif col == \"boat\":\n return \"boat\"\n elif col == np.nan:\n pass\n else:\n return np.nan","repo_name":"Andrestart/-data_cleaning-Andres_Perez","sub_path":"src/cleaning_functions.py","file_name":"cleaning_functions.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37139758866","text":"T = int(input())\nfor tc in range(T):\n n = int(input())\n result = []\n final = []\n nasa = list(map(int, input().split()))\n for i in range(0, n*2, 2):\n result.append([nasa[i], nasa[i+1]])\n final.append(result.pop(0))\n loc = 0\n while result:\n if result[loc][1] == final[0][0]:\n final.insert(0, result.pop(loc))\n loc = 0\n elif result[loc][0] == final[len(final) -1][1]:\n final.append(result.pop(loc))\n loc = 0\n else:\n loc += 1\n print(\"#%d\" %(tc + 1), end=\" \")\n for i in range(len(final)):\n print(final[i][0], end=\" \")\n print(final[i][1], end=\" \")\n print()\n","repo_name":"wony5248/Daily_Study","sub_path":"daily_PS_SWEA/SWEA 금속막대 - DP.py","file_name":"SWEA 금속막대 - DP.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18204181837","text":"import sys\nimport heapq\n\ninput = sys.stdin.readline\nT = int(input())\n\nfor _ in range(T):\n k = int(input())\n visited = [False] * 1_000_000\n min_Q = []\n max_Q = []\n \n for i in range(k):\n operation = list(input().split())\n \n if operation[0] == 'I':\n value = int(operation[-1])\n heapq.heappush(min_Q, (value, i))\n heapq.heappush(max_Q, (-1 * value, i))\n visited[i] = True\n elif operation[-1] == '1':\n while max_Q and not visited[max_Q[0][1]]:\n heapq.heappop(max_Q)\n if max_Q:\n visited[max_Q[0][1]] = False\n heapq.heappop(max_Q)\n else:\n while min_Q and not visited[min_Q[0][1]]:\n heapq.heappop(min_Q)\n if min_Q:\n visited[min_Q[0][1]] = False\n heapq.heappop(min_Q)\n \n while max_Q and not visited[max_Q[0][1]]:\n heapq.heappop(max_Q)\n while min_Q and not visited[min_Q[0][1]]:\n heapq.heappop(min_Q)\n\n if min_Q and max_Q:\n print(f'{-1 * max_Q[0][0]} {min_Q[0][0]}')\n else:\n print('EMPTY')","repo_name":"heejun32/Algorithm","sub_path":"BaekJoon/7662_again.py","file_name":"7662_again.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10912581671","text":"import dash\nfrom dash.dependencies import Event, Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport flask\nimport os\nimport uuid\nfrom dash import DashResponse\nfrom declarations import User\nimport plotly.graph_objs as go\nfrom sqlalchemy import create_engine, exists\nfrom sqlalchemy.orm import relationship, sessionmaker\nimport declarations as dc\nfrom flask import Flask, request, render_template, flash, abort, session, redirect\nfrom flask import make_response\nfrom sqlalchemy.ext.declarative import declarative_base\n\nserver = flask.Flask(__name__)\napp = dash.Dash(__name__, server=server)\n\nusers = dict()\n\ndef get_session():\n engine = create_engine(os.environ['SQLALCHEMY_DATABASE_URI'])\n engine.connect()\n Session = sessionmaker(bind=engine)\n return Session()\n\ndef layout(id):\n # Edit this object!\n return html.Div(\n [\n html.Div(\n [\n id\n\n ],\n className='six columns',\n style={'margin-top': '10'}\n ),\n\n ],\n className='row')\n\n# Barebones layout\napp.layout = html.Div([\n dcc.Interval(id='refresh', interval=2000),\n# Edit this object!\n html.Div(\n [\n html.Div(\n [\n html.Div(\n [\n html.Div([\n html.Div([], id='start_content', className=\"container\")\n ], id='content', className=\"container\"),\n html.Div(id='graph', className=\"container\")\n\n ],\n className='twelve columns'\n ),\n\n ],\n className='row'\n ),\n html.Div(\n [\n html.Div([], id='open_div', className=\"container\"),\n html.Div([], id='close_div', className=\"container\"),\n html.Button('Tuleb', id='yes', className='button-primary four columns'),\n html.Div([], className='four columns'),\n html.Button('Ei Tule', id='no', className='button-primary four columns'),\n ],\n className='eight columns'\n ),\n ],\n\n className='six columns offset-by-three'\n )\n\n])\n\ndef get_wait_for_next(curr_state, id):\n return 'Please wait for start voting'\n\n\ndef get_voted_graph(curr_state, id):\n session = get_session()\n votes_yes = session.query(dc.Votes).filter(dc.Votes.state == curr_state.state).filter(\n dc.Votes.vote == 'yes').all()\n votes_no = session.query(dc.Votes).filter(dc.Votes.state == curr_state.state). \\\n filter(dc.Votes.vote == 'no').all()\n\n labels = ['Tuleb', 'Ei tule']\n values = [len(votes_yes), len(votes_no)]\n colors = ['#96D38C', '#E1396C']\n\n trace = go.Pie(labels=labels, values=values, marker=dict(colors=colors,\n line=dict(color='#000000', width=2)))\n\n figure = {\n 'data': [trace]\n }\n\n return dcc.Graph(id='main_graph',\n figure=figure,\n style={\n 'height': 400\n },\n config={\n 'displayModeBar': False,\n })\n\n# Update the `content` div with the `layout` object.\n# When you save this file, `debug=True` will re-run\n# this script, serving the new layout\n@app.callback(\n Output('content', 'children'),\n events=[Event('refresh', 'interval')])\ndef display_layout():\n session = get_session()\n\n if session.query(dc.Current_State).all():\n curr_state = session.query(dc.Current_State).all()[0]\n else:\n curr_state = None\n\n id = flask.request.cookies.get('watcher_id')\n\n if curr_state and curr_state.opened == 1:\n voted = session.query(dc.Votes).filter(dc.Votes.uuid == id).filter(dc.Votes.state == curr_state.state).all()\n if len(voted) == 0 and session.query(dc.State).filter(dc.State.id==curr_state.state).all():\n state = session.query(dc.State).filter(dc.State.id==curr_state.state).all()[0]\n\n response = DashResponse(html.Div(layout(state.text)))\n else:\n response = DashResponse(get_voted_graph(curr_state, id))\n\n else:\n response = DashResponse(html.Div(get_wait_for_next(curr_state, id)))\n if id is None:\n response.set_cookie('watcher_id', str(uuid.uuid4()))\n return response\n\n@app.callback(\n Output('open_div', 'children'),\n [Input('yes', 'n_clicks')])\ndef yes(n_click):\n session = get_session()\n if n_click is not None:\n if session.query(dc.Current_State).all():\n curr_state = session.query(dc.Current_State).all()[0]\n else:\n curr_state = None\n id = flask.request.cookies.get('watcher_id')\n\n if curr_state and curr_state.opened == 1:\n voted = session.query(dc.Votes).filter(dc.Votes.uuid == id).filter(dc.Votes.state == curr_state.state).all()\n if len(voted) == 0:\n vote = dc.Votes(uuid=id, state=curr_state.state, vote='yes')\n session.add(vote)\n session.commit()\n session.close()\n\n\n@app.callback(\n Output('close_div', 'children'),\n [Input('no', 'n_clicks')])\ndef no(n_click):\n session = get_session()\n if n_click is not None:\n if session.query(dc.Current_State).all():\n curr_state = session.query(dc.Current_State).all()[0]\n else:\n curr_state = None\n\n id = flask.request.cookies.get('watcher_id')\n\n if curr_state and curr_state.opened == 1:\n voted = session.query(dc.Votes).filter(dc.Votes.uuid == id).filter(dc.Votes.state == curr_state.state).all()\n if len(voted) == 0:\n vote = dc.Votes(uuid=id, state=curr_state.state, vote='no')\n session.add(vote)\n session.commit()\n session.close()\n\napp.css.append_css({\"external_url\": \"https://codepen.io/chriddyp/pen/bWLwgP.css\"})\n\nstatic_route = '/static/<path:path>'\n\ndef save_user(username, email):\n #Session = sessionmaker(bind=dc.engine)\n user_id = str(uuid.uuid4())\n users[user_id] = User(name=username, email=email)\n #session = Session()\n #session.add(user_added)\n #session.commit()\n #session.close()\n return user_id\n\n@server.route('/register_vote')\ndef vote():\n username = flask.request.args.get('email')\n email = flask.request.args.get('name')\n user_added = save_user(username, email)\n response = make_response(redirect('/start'))\n response.set_cookie('watcher_id', str.encode(user_added))\n return response\n\n\n@server.route('/start')\ndef index():\n resp = flask.make_response(flask.render_template('index.html'))\n #resp.set_cookie('watcher_id', str(uuid.uuid4()))\n return resp\n\n@server.route(static_route)\ndef serve_static(path):\n root_dir = os.getcwd()\n return flask.send_from_directory(\n os.path.join(root_dir, 'static'), path\n)\nif __name__ == '__main__':\n app.run_server(port=8888)\n","repo_name":"alextavgen/vote_system","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41263001544","text":"import numpy as np\nfrom mantid.simpleapi import Fit, mtd, PDFFourierTransform,\\\n Rebin, AppendSpectra, LoadNexus, ExtractSingleSpectrum,\\\n CloneWorkspace\nfrom pdb import set_trace as tr\n\n#################################\n## Customize these variables ##\n#################################\nslices_workspace_name = \"slices_QE\"\nfirst_slice = 0 # first_slice doesn't have to be smaller than last_slice\nlast_slice = 115\nout_root = \"fit\" # root-name of output workspaces containing fit-related output\nout_sqe = \"sqe_no_background\" # name of output workspace containing slices without the background, S(Q,E)\nout_gre = \"gre\" # output workspace containing the Fourier transform of out_sqe using PDFFourierTransform, G(r,E)\ngre_options = {\"DeltaR\": 0.01, # Options for algorithm PDFFourierTransform\n \"Rmax\": 20.0}\n# Here paste the QuadXGaus+LB model you copied from the model builder\nmodel_string = \"\"\"(composite=ProductFunction,NumDeriv=false;name=Quadratic,\nA0=0.0771474,A1=-0.0184599,A2=0.00192572;name=Gaussian,Height=1,PeakCentre=4.06054,\nSigma=2.91412,ties=(Height=1));name=LinearBackground,A0=-0.0362006,A1=0.00913319\"\"\"\n\n############################################\n## Unnecessary to change the code below ##\n############################################\n\n\ndef get_boundaries(workspace, index):\n \"\"\"\n Return non-zero intensity range\n :param workspace: workspace containing the slices\n :param index: workspace index for the slice of interest\n :return: starting and ending Q values having non-zero intensity\n \"\"\"\n y = workspace.dataY(index) # intensities\n nonzero_indices = np.where(y>0)[0]\n q = workspace.dataX(index) # Q-values\n return q[nonzero_indices[0]], q[nonzero_indices[-1]]\n\n\ndef templatize(model):\n \"\"\"\n Substitute specific parameter values with appropriate keywords.\n NOTICE: Now only works for the built-in model QuadXGaus+LB\n :param model: model string with specific parameter values\n :return: templatized string\n \"\"\"\n return \"\"\"\n (composite=ProductFunction,NumDeriv=false;\n name=Quadratic,A0={{f0.f0.A0}},A1={{f0.f0.A1}},A2={{f0.f0.A2}};\n name=Gaussian,Height=1,PeakCentre={{f0.f1.PeakCentre}},Sigma={{f0.f1.Sigma}},ties=(Height=1)\n );\n name=LinearBackground,A0={{f1.A0}},A1={{f1.A1}}\n\"\"\"\n\n\ndef update_model(model, table):\n \"\"\"\n Update the model string with the argument parameters\n :param model: string describing the fit model\n :param table: table workspace containing latest optimized values\n :return: model string with updated parameter values\n \"\"\"\n new_model = templatize(model)\n for index in range(table.rowCount()-1):\n row = table.row(index)\n new_model = new_model.replace('{{'+row[\"Name\"]+'}}', str(row[\"Value\"]))\n return new_model\n\n\ndef append_spectrum(single, composite):\n \"\"\"\n Append a single spectrum to a matrix workspace.\n Caveat to solve: single and composite have different number of bins\n :param single: workspace containing the single new spectrum\n :param composite: workspace matrix containing the processed spectra\n \"\"\"\n # Find binning triad for the single and composite workspaces\n qs = single.dataX(0)\n qsm, dqs, qsM = qs[0], (qs[-1]-qs[0])/(len(qs)-1), qs[-1]\n qc = composite.dataX(0)\n qcm, dqc, qcM = qc[0], (qc[-1]-qc[0])/(len(qc)-1), qc[-1]\n # Find the biggest range and finer binning\n qmin = qsm if qsm < qcm else qcm\n dq = dqs if dqs < dqc else dqc\n qmax = qsM if qsM < qcM else qcM\n # Rebin when necessary\n delete_single = False\n if [qsm, dqs, qsM] != [qmin, dq, qmax]:\n delete_single = True\n single = Rebin(single, [qmin, dq, qmax])\n if [qcm, dqc, qcM] != [qmin, dq, qmax]:\n composite = Rebin(composite, [qmin, dq, qmax], OutputWorkspace=composite.name())\n composite = AppendSpectra(composite, single, OutputWorkspace=composite.name())\n if delete_single:\n DeleteWorkspace(single)\n return composite\n\nsqe = None\ngre = None\nworkspaces_to_group = list()\nslices_workspace = mtd[slices_workspace_name]\njump = 1 if first_slice < last_slice else -1\nid_slice = first_slice\nwhile id_slice != last_slice:\n start, end = get_boundaries(slices_workspace, id_slice)\n \"\"\"\n Algorithm Fit creates the following workspaces\n out_root_Parameters: optimal values of fitting parameters\n out_root_Workspaces: slice, model-fit, residuals, and model-components\n out_root_CovariantMatrix: correlations among the workspaces\n \"\"\"\n Fit(Function=model_string,\n InputWorkspace=slices_workspace_name,\n WorkspaceIndex=id_slice,\n StartX=start,\n EndX=end,\n Output=\"{0}{1}\".format(out_root,id_slice),\n CreateOutput=True)\n for key in (\"Workspace\", \"Parameters\", \"NormalisedCovarianceMatrix\"):\n workspaces_to_group.append(\"{0}{1}_{2}\".format(out_root, id_slice, key))\n # Update the model string. We take the current optimized model\n # as the initial guess for fitting of the next slice.\n parameters_workspace = mtd[\"{0}{1}_Parameters\".format(out_root, id_slice)]\n model_string = update_model(model_string, parameters_workspace)\n # Calculate the PDF of the residuals, which is the slice without\n # the model background\n fits_workspace = mtd[\"{0}{1}_Workspace\".format(out_root, id_slice)]\n residuals = ExtractSingleSpectrum(fits_workspace, 2)\n sqe = CloneWorkspace(residuals, OutputWorkspace=out_sqe)\\\n if not sqe else append_spectrum(residuals, sqe)\n single_gre = PDFFourierTransform(InputWorkspace=residuals,\n InputSofQType=\"S(Q)-1\",\n Qmin=start,\n Qmax=end,\n PDFType=\"G(r)\",\n DeltaR=gre_options[\"DeltaR\"],\n Rmax=gre_options[\"Rmax\"])\n gre = CloneWorkspace(single_gre, OutputWorkspace=out_gre)\\\n if not gre else append_spectrum(single_gre, gre)\n id_slice += jump\n\n# Group all the fit workspaces\nGroupWorkspaces(InputWorkspaces=\",\".join(workspaces_to_group), OutputWorkspace=\"{0}s\".format(out_root))\n\n# Clean up\nDeleteWorkspace(residuals)\nDeleteWorkspace(single_gre)","repo_name":"mantidproject/documents","sub_path":"Help/DynamicPDF/Tutorials/BackgroundRemover/supporting/sequentialFit.py","file_name":"sequentialFit.py","file_ext":"py","file_size_in_byte":6230,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"16035883043","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# *****************************************************\n#\n# file: yunnan.py\n# author: zoulingwei@zuoshouyisheng.com\n# date: 2020-12-21\n# brief: \n#\n# cmd>e.g: \n# *****************************************************\n\nimport time\nimport json\nimport requests\nimport random\nfrom urllib import request\n\nclass Request():\n '''Restful'''\n def __init__(self,url):\n self.url = url\n\n def request(self, data):\n '''post请求接口'''\n jdata = json.dumps(data).encode(encoding='utf-8')\n # print jdata\n headers = {'Content-Type': 'application/json'} # 指明用json方式发送数据\n req = request.Request(self.url, headers=headers,data=jdata)\n response = request.urlopen(req)\n res = response.read()\n js_out = json.loads(res)\n return js_out\n\n def request2(self, data):\n res = requests.post(self.url, json=data) # post请求用data参数\n # print(key)\n js_res = json.loads(res.text, encoding='utf8')\n return js_res\n\n\n\nrq = Request(url='https://www.12309.gov.cn/getFileListByPage')\n\nsf = open('zousi20211014.txt','w')\n\nfor page in range(1,800):\n\n data = {\"codeId\":\"\",\"page\":page,\"size\":15,\"fileType\":\"重要案件信息\",\"channelWebPath\":\"/gj/yn\",\"channelLevels\":\"\"}\n\n time.sleep(2+3*random.random())\n try:\n out = rq.request2(data)\n results = out.get('results',{})\n hits = results.get('hits',{})\n hits1 = hits.get('hits',[])\n print(page,len(hits1))\n for _dic in hits1:\n title = _dic.get('title','')\n # print(title)\n publishTime = _dic.get('publishedTimeStr','')\n subTitle = _dic.get('subTitle','')\n memo = _dic.get('memo','')\n content = _dic.get('content','')\n out_str = '。'.join([title,subTitle,memo,content])\n if '走私' in out_str:\n print(page)\n print(title)\n sf.write(str(page)+'\\t'+publishTime+'\\t'+out_str+'\\n')\n except:\n continue\nsf.close()\n\n\n\n","repo_name":"zoulala/Spiders","sub_path":"law12309/yunnan.py","file_name":"yunnan.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"70720849226","text":"import PySimpleGUI as sg\nimport os\nimport webbrowser\n\nfrom PySimpleGUI.PySimpleGUI import P\nfrom main import runCode\n\ndirname = os.path.dirname(__file__)\nicon_tantantAn = os.path.join(dirname, \"Icon.ico\")\n\ndef mensagem_erro(mensagem):\n sg.Popup(mensagem, title='Erro', modal=True, grab_anywhere=True, keep_on_top=True)\n\ndef pergunta_salvar(nome_ficheiro, PseudoPy):\n Guardar_pergunta_layout = [[sg.Text(\"Gostaria de salvar o programa?\")],\n [sg.Column([[sg.Button(\"Sim\"), sg.Button(\"Não\")]], justification='center')]]\n Guardar_pergunta = sg.Window(\"Guardar?\", Guardar_pergunta_layout, font=(\"Arial\", 12), use_default_focus=0 , no_titlebar=True, modal=True, grab_anywhere=True, keep_on_top=True)\n while True:\n event, values = Guardar_pergunta.read()\n if event == \"Sim\":\n salvar(nome_ficheiro, PseudoPy)\n break\n if event == \"Não\":\n apagar_vazio(nome_ficheiro, ficheiro_path=False)\n break\n Guardar_pergunta.close()\n\ndef abrir_ficheiro(main_window):\n main_window.close()\n escolha_ficheiro_layout=[[sg.Text(\"Escolha o Ficheiro: \"), sg.Input(readonly=(True), change_submits=True, disabled_readonly_background_color=\"#363636\"), sg.FileBrowse(key='File', button_text='Procurar')],\n [sg.Button(\"Voltar\"), sg.Button('Abrir')] ]\n escolha_ficheiro = sg.Window(\"Escolha de Ficheiro\", escolha_ficheiro_layout , finalize=True ,use_default_focus=0, modal=False , no_titlebar=True, grab_anywhere=True, keep_on_top=True)\n while True:\n event, values = escolha_ficheiro.read()\n if event == \"Voltar\":\n break\n if event == \"Abrir\":\n ficheiro_path = str(values['File'])\n if os.path.exists(ficheiro_path):\n if values:\n #lista_fp = ficheiro_path.split('/')\n #nome_ficheiro = lista_fp[-1]\n nome_ficheiro = os.path.basename(ficheiro_path)\n escolha_ficheiro.close()\n PseudoPy_f(nome_ficheiro, main_window, ficheiro_path)\n return nome_ficheiro and ficheiro_path\n else:\n mensagem = \"O ficheiro escolhido não existe\"\n mensagem_erro(mensagem)\n escolha_ficheiro.close()\n if event == 'Voltar':\n mainwindow()\n\ndef criarficheiro_f(main_window):\n main_window.close()\n nome_ficheiro_layout=[[sg.Text('Nome do arquivo a ser criado:'),sg.InputText()],\n [sg.Button(\"Ok\", bind_return_key=True),sg.Button(\"Voltar\")]]\n criar_ficheiro = sg.Window(\"Criar Ficheiro\", nome_ficheiro_layout, no_titlebar=True,grab_anywhere=True, keep_on_top=True)\n while True:\n event, values = criar_ficheiro.read()\n if event == \"Voltar\":\n break\n if event == \"Ok\":\n if values[0] != '':\n criar_ficheiro.disappear()\n nome_ficheiro = values[0] + \".txt\"\n if not os.path.isfile(nome_ficheiro):\n arquivo = open(nome_ficheiro,\"w\")\n arquivo.close()\n PseudoPy_f(nome_ficheiro, main_window, ficheiro_path=False)\n return nome_ficheiro\n else:\n criar_ficheiro.reappear()\n mensagem = \"Já exite um ficheiro com esse nome\"\n mensagem_erro(mensagem)\n else:\n mensagem = \"Não se pode criar um ficheiro sem nome\"\n mensagem_erro(mensagem)\n criar_ficheiro.close()\n if event == 'Voltar':\n mainwindow()\n\ndef comparar_texto(nome_ficheiro, PseudoPy):\n ficheiro_aberto = open(nome_ficheiro,'rt', encoding='utf-8')\n velho_texto = ficheiro_aberto.readlines()\n novo_texto = []\n for letras in velho_texto:\n nova_letra = letras. replace('\\n', '')\n novo_texto.append(nova_letra)\n line_texto = PseudoPy['-MLINE-'].get()\n line_texto = line_texto.splitlines()\n if novo_texto == line_texto:\n salvo = True\n else:\n salvo = False\n return salvo\n\ndef texto_ficheiro(ficheiro_path, PseudoPy):\n ficheiro_aberto = open(ficheiro_path,'rt', encoding='utf-8')\n texto = ficheiro_aberto.readlines()\n for i in texto:\n if i[-1] == '\\n':\n i = i. replace('\\n', '')\n PseudoPy['-MLINE-'].print(i)\n ficheiro_aberto.close()\n\ndef apagar_vazio(nome_ficheiro, ficheiro_path):\n if not ficheiro_path:\n tamanho = os.path.getsize(nome_ficheiro)\n if tamanho == 0:\n os.remove(nome_ficheiro)\n if ficheiro_path:\n tamanho = os. path. getsize(ficheiro_path)\n if tamanho == 0:\n os.remove(ficheiro_path)\n\ndef PseudoPy_f(nome_ficheiro, main_window, ficheiro_path):\n main_window.close()\n menu_def = [['Ficheiro', ['Correr','Guardar', 'Voltar', 'Sair']],\n ['Ajuda', ['Sobre...']]]\n \n PseudoPy_layout = [[sg.MenuBar(menu_def, font=(\"Arial\", 10), background_color='white', text_color='black')],\n #[sg.Column([[sg.Button('Correr', border_width=0, button_color=\"#333333\")]], justification = 'left')],\n [sg.Multiline(size=(800, 600), font=(\"Arial\", 12), background_color='#222021', text_color='white', key='-MLINE-')]]\n \n PseudoPy = sg.Window((\"PseudoPy: \" + nome_ficheiro) , PseudoPy_layout, default_element_size=(12, 1), auto_size_text=False, auto_size_buttons=False, \n default_button_element_size=(12, 1), grab_anywhere=True ,finalize = True, resizable=True, margins=(0, 0), icon= icon_tantantAn, use_default_focus=0, enable_close_attempted_event=True)\n PseudoPy['-MLINE-'].Widget.config(wrap='none') \n PseudoPy['-MLINE-'].Widget.config(insertbackground='white')\n PseudoPy.Maximize()\n if ficheiro_path:\n texto_ficheiro(ficheiro_path, PseudoPy)\n while True:\n event, values = PseudoPy.read(close=False)\n if event == sg.WINDOW_CLOSE_ATTEMPTED_EVENT or event == 'Sair':\n if ficheiro_path:\n salvo = comparar_texto(ficheiro_path, PseudoPy)\n if not salvo:\n pergunta_salvar(ficheiro_path, PseudoPy)\n else:\n salvo = comparar_texto(nome_ficheiro, PseudoPy)\n if not salvo:\n pergunta_salvar(nome_ficheiro, PseudoPy)\n #if os.path.isfile(nome_ficheiro):\n #apagar_vazio(nome_ficheiro, ficheiro_path)\n break\n if event == 'Correr':\n from ficheiro import loadLines\n if ficheiro_path:\n salvar(ficheiro_path, PseudoPy)\n lines = loadLines(ficheiro_path)\n else:\n salvar(nome_ficheiro, PseudoPy)\n lines = loadLines(nome_ficheiro)\n runCode(lines)\n if event == 'Guardar':\n if ficheiro_path:\n salvar(ficheiro_path, PseudoPy)\n else:\n salvar(nome_ficheiro, PseudoPy)\n if event == 'Sobre...':\n webbrowser.open(\"http://didas72.hopto.org/pseudo_python_PT.html\", new=1)\n #if event == 'Apagar':\n #if ficheiro_path:\n #os.remove(ficheiro_path)\n #else:\n #os.remove(nome_ficheiro)\n if event == 'Voltar':\n if ficheiro_path:\n pergunta_salvar(ficheiro_path, PseudoPy)\n else:\n pergunta_salvar(nome_ficheiro, PseudoPy)\n break\n PseudoPy.close()\n if event == 'Voltar':\n mainwindow()\n\ndef salvar(nome_ficheiro, PseudoPy):\n save_file = open(nome_ficheiro, 'wt', encoding = 'UTF-8')\n save_file.write(PseudoPy['-MLINE-'].get())\n save_file.close()\n\ndef mainwindow():\n global icon_tantantAn\n dirname = os.path.dirname(__file__)\n Logo_tantanTAN = os.path.join(dirname, \"Logo.png\")\n Logo = sg.Image(filename=Logo_tantanTAN)\n main_window_layout = [ [sg.Column([[Logo]], justification='center')],\n [sg.Column([[sg.Text('PseudoPython', font= (\"Arial\", 30))]], justification='center')],\n [sg.Column([[sg.Button('Abrir Ficheiro', font = (\"Arial\", 13)), sg.Button('Criar Ficheiro', font = (\"Arial\", 13))]], justification='center')]]\n\n main_window = sg.Window('PseudoPython', main_window_layout, use_default_focus=0, grab_anywhere=True, icon= icon_tantantAn)\n while True:\n event, values = main_window.read()\n if event == sg.WIN_CLOSED:\n break\n if event == 'Abrir Ficheiro':\n abrir_ficheiro(main_window)\n if event == 'Criar Ficheiro':\n criarficheiro_f(main_window)\n main_window.close()\n\nsg.theme_input_background_color('#363636')\nsg.theme_input_text_color('white')\nsg.theme_background_color(\"#333333\")\nsg.theme_button_color(\"#363636\")\nsg.theme_element_background_color(\"#333333\")\nsg.theme_element_text_color(\"white\")\nsg.theme_text_color(\"white\")\nsg.theme_text_element_background_color(\"#333333\")\n\na = True\nwhile a:\n a = False\n mainwindow()","repo_name":"didas72/PseudoPython","sub_path":"UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":9047,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28299262260","text":"from math import sqrt\n\n\ndef ans9(n: int, x: float):\n '''\n n - степень\n x - само число\n '''\n\n def chisl(x: float, n: int):\n return (x* (n**x - 1))\n\n def znam(x: float, n: int):\n return (x - (2**n))\n\n some_n = 1\n f = 1\n\n while some_n != n:\n a1 = ((chisl(x=x, n=some_n))/(znam(x=x, n=some_n)))\n a2 = ((chisl(x=x, n=some_n+1))/(znam(x=x, n=some_n+1)))\n f = a1 * a2\n some_n += 1\n\n return f\n\n\ndef ans12(k: int, n: int):\n\n mysum = 0\n some_n = 1\n\n def some_sqrt(kn, nn):\n return sqrt(kn* (nn - 1))\n\n while some_n != n:\n a1 = some_sqrt(kn=k, nn=some_n)\n a2 = some_sqrt(kn=k+1, nn=some_n+1)\n mysum += a1 + a2\n some_n += 1\n\n return mysum\n\n\ndef ans3(n: int, x: float):\n '''\n x >= 3\n n >= 3\n '''\n def fac(n):\n factorial = 1\n while n > 1:\n factorial *= n\n n -= 1\n return factorial\n\n def chisl(xx, nn):\n return fac(x**(2*n-1))\n\n def znam(nn):\n return ((2 * nn) - 1)\n\n some_n = 1\n f = x\n\n while some_n != n:\n f -= (chisl(x, n))/(znam(n))\n some_n += 1\n\n return f\n\n\ndef ans16(n):\n def fac(n):\n factorial = 1\n while n > 1:\n factorial *= n\n n -= 1\n return factorial\n p = 0\n for i in range(2, 2 * n + 1, 2):\n p += fac(i)\n print(p)\n\n return y\n\n\ndef abonent(room):\n '''Написать программу, которая по заданному номеру квартиры абонента кабельной сети выводит на экран номер подъезда, в котором находится абонент. Дом состоит из 4 подъездов. В первом подъезде абонентами являются квартиры: 3 – 7, 13, 15; во втором: 19 – 20, 27, 28; в третьем: 33, 35, 37 – 40; в четвертом: 51.'''\n '''\n 4 подъезда\n 1: 3-7, 13, 15\n 2: 19 - 20, 27, 28\n 3: 33, 35, 37, 37 - 40\n 4: 51\n '''\n\n p1 = [3, 4, 5, 6, 7, 13, 15]\n p2 = [19, 20, 27, 28]\n p3 = [33, 35, 37, 38, 39, 40]\n p4 = [51]\n\n if room in p1:\n return 'Подъезд 1'\n elif room in p2:\n return 'Подъезд 2'\n elif room in p3:\n return 'Подъезд 3'\n elif room in p4:\n return 'Подъезд 4'\n\n else:\n return 'Не в этих квартирах'\n","repo_name":"pavelepanov/homeworks_lyceum","sub_path":"december_11/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13036183337","text":"from .. utils.code import isCodeValid, getSyntaxError, containsStarImport\nfrom . compile_scripts import compileScript\nfrom .. problems import ExecutionUnitNotSetup\nfrom . code_generator import getSocketValueExpression, iterSetupCodeLines, getInitialVariables\n\nuserCodeStartComment = \"# User Code\"\n\nclass ScriptExecutionUnit:\n def __init__(self, network, nodeByID):\n self.network = network\n self.setupScript = \"\"\n self.setupCodeObject = None\n self.executionData = {}\n\n self.scriptUpdated(nodeByID)\n\n def scriptUpdated(self, nodeByID = None):\n self.generateScript(nodeByID)\n self.compileScript()\n self.execute = self.raiseNotSetupException\n\n def setup(self):\n self.executionData = {}\n exec(self.setupCodeObject, self.executionData, self.executionData)\n self.execute = self.executionData[\"main\"]\n\n def insertSubprogramFunctions(self, data):\n self.executionData.update(data)\n\n def finish(self):\n self.executionData.clear()\n self.execute = self.raiseNotSetupException\n\n def getCodes(self):\n return [self.setupScript]\n\n\n def generateScript(self, nodeByID):\n node = self.network.getScriptNode(nodeByID)\n userCode = node.executionCode\n\n variables = getInitialVariables([node])\n\n finalCode = []\n finalCode.extend(iterSetupCodeLines([node], variables))\n finalCode.append(self.getFunctionHeader(node))\n\n if containsStarImport(userCode):\n finalCode.append(\" {}.errorMessage = 'Star import is not allowed'\".\n format(node.identifier))\n elif isCodeValid(userCode):\n finalCode.extend(indent(self.getFunctionBodyLines(node, userCode)))\n\n # used to show the correct line numbers to the user\n lineNumber = findFirstLineIndexWithContent(finalCode, userCodeStartComment) + 1\n finalCode.append(\"USER_CODE_START_LINE = {}\".format(lineNumber))\n else:\n error = getSyntaxError(userCode)\n finalCode.append(\" {}.errorMessage = 'Line: {} - Invalid Syntax'\".\n format(node.identifier, error.lineno))\n finalCode.append(\" \" + self.getDefaultReturnStatement(node))\n\n self.setupScript = \"\\n\".join(finalCode)\n\n def getFunctionBodyLines(self, node, userCode):\n lines = []\n lines.append(\"\\n\")\n lines.append(userCodeStartComment)\n lines.extend(userCode.split(\"\\n\"))\n lines.append(\"\\n\")\n if node.initializeMissingOutputs:\n lines.extend(self.iterInitializeMissingOutputsLines(node))\n if node.correctOutputTypes:\n lines.extend(self.iterTypeCorrectionLines(node))\n lines.append(self.getReturnStatement(node))\n\n if node.debugMode:\n return list(self.iterDebugModeFunctionBody(lines, node))\n else:\n return lines\n\n def iterDebugModeFunctionBody(self, lines, node):\n yield \"try:\"\n yield \" {}.errorMessage = ''\".format(node.identifier)\n yield from indent(lines)\n yield \"except Exception as e:\"\n yield \" __exceptionType, __exception, __tb = sys.exc_info()\"\n yield \" __lineNumber = __tb.tb_lineno - USER_CODE_START_LINE\"\n yield \" {}.errorMessage = 'Line: {{}} - {{}} ({{}})'.format(__lineNumber, __exception, type(e).__name__)\".format(node.identifier)\n yield \" \" + self.getDefaultReturnStatement(node)\n\n def getFunctionHeader(self, node):\n inputNames = [socket.text for socket in node.inputs[:-1]]\n parameterList = \", \".join(inputNames)\n header = \"def main({}):\".format(parameterList)\n return header\n\n def iterInitializeMissingOutputsLines(self, node):\n yield \"\"\n yield \"# initialize missing outputs\"\n yield \"localVariables = locals()\"\n for i, socket in enumerate(node.outputs[:-1]):\n variableName = socket.text\n yield \"__socket = {}.outputs[{}]\".format(node.identifier, i)\n yield \"__socket['variableInitialized'] = {} in localVariables\".format(repr(variableName))\n yield \"if not __socket['variableInitialized']:\"\n yield \" {} = __socket.getDefaultValue()\".format(variableName)\n\n def iterTypeCorrectionLines(self, node):\n yield \"\"\n yield \"# correct output types\"\n for i, socket in enumerate(node.outputs[:-1]):\n variableName = socket.text\n yield \"__socket = {}.outputs[{}]\".format(node.identifier, i)\n yield \"{0}, __socket['correctionType'] = __socket.correctValue({0})\".format(variableName)\n\n def getReturnStatement(self, node):\n outputNames = [socket.text for socket in node.outputs[:-1]]\n returnList = \", \".join(outputNames)\n return \"return \" + returnList\n\n def getDefaultReturnStatement(self, node):\n outputSockets = node.outputs[:-1]\n outputExpressions = [getSocketValueExpression(socket, node) for socket in outputSockets]\n return \"return \" + \", \".join(outputExpressions)\n\n def compileScript(self):\n self.setupCodeObject = compileScript(self.setupScript, name = \"script: {}\".format(repr(self.network.name)))\n\n def raiseNotSetupException(self):\n raise ExecutionUnitNotSetup()\n\ndef indent(lines, amount = 1):\n return (\" \" * (4 * amount) + line for line in lines) # returns a generator\n\ndef findFirstLineIndexWithContent(lines, content):\n for i, line in enumerate(lines, start = 1):\n if content in line: return i\n return 0\n","repo_name":"JacquesLucke/animation_nodes","sub_path":"animation_nodes/execution/script_execution_unit.py","file_name":"script_execution_unit.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","stars":2231,"dataset":"github-code","pt":"81"} +{"seq_id":"3799495455","text":"# import findspark\n# findspark.init()\nfrom __future__ import print_function\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.sql import SparkSession, SQLContext, Row\nfrom pyspark.sql.functions import col\n\nfrom pyspark.ml.feature import Tokenizer, StopWordsRemover, CountVectorizer, StringIndexer, NGram\nfrom pyspark.ml import Pipeline, PipelineModel\n\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\nfrom pyspark.ml.classification import NaiveBayes\n\nimport datetime\n\n\nspark = SparkSession.builder.appName(\"SpamClassifier\").getOrCreate()\n#rw = spark.read.option(\"delimiter\", \"\\t\").csv(\"/home/pes1ug19cs341/Desktop/BigData/Project/train.csv\").toDF('Subject', 'Message', 'Spam/ham')\n#rw.show(5)\nrw = spark.read.format(\"csv\").option(\"header\", \"true\").load(\"/home/pes1ug19cs341/Desktop/BigData/Project/train.csv\")\nrw.show(5)\n\n\n# DATA PROCESSING\ntknizer = Tokenizer().setInputCol('Message').setOutputCol('words')\nstopwrds = StopWordsRemover().getStopWords() + ['-']\nrem = StopWordsRemover().setStopWords(stopwrds).setInputCol('words').setOutputCol('filtered')\nbigram = NGram().setN(2).setInputCol('filtered').setOutputCol('bigrams')\n\ncvmodel = CountVectorizer().setInputCol('filtered').setOutputCol('features')\ncvmodel_ngram = CountVectorizer().setInputCol('bigrams').setOutputCol('features')\n\nind = StringIndexer().setInputCol('Spam/Ham').setOutputCol('label')\n\npp = Pipeline(stages = [tknizer, rem, bigram, cvmodel, ind])\npreprocessed = pp.fit(rw)\npreprocessed.transform(rw).show(5)\n\n\n\nmdlnb = NaiveBayes(smoothing = 1)\npipeline = Pipeline(stages = [tknizer, rem, cvmodel, ind, mdlnb])\nmdl = pipeline.fit(\"train.csv\")\npredctn = mdl.transform(\"test.csv\")\npredctn.select('message', 'label', 'rawPrediction', 'probability', 'prediction').show(5)\n\neval = BinaryClassificationEvaluator().setLabelCol('label').setRawPredictionCol('prediction').setMetricName('areaUnderROC')\nac = eval.evaluate(predctn)\nprint(\"AUC: \", ac)\n\nmdl.save('/home/pes1ug19cs341/Desktop/BigData/Project/')\n\nif __name__ == \"__main__\":\n\n sc = SparkContext(appName = \"SpamClassifier\")\n ssc = StreamingContext(sc, 60)\n\n curr = datetime.datetime.now()\n fp = \"/home/pes1ug19cs341/Desktop/BigData/Project/\" + curr.strftime(\"%Y-%m-%d/\")\n #print(fp)\n lines = ssc.textFileStream(fp)\n\n def process(t, rdd):\n if rdd.isEmpty():\n print(\"Input is Empty\")\n return\n\n spark = SparkSession.builder.getOrCreate()\n rdd1 = rdd.map(lambda x : Row(message = x))\n df = spark.createDataFrame(rdd1)\n print(df.show())\n\n if not rdd.isEmpty():\n modl = PipelineModel.load('/home/pes1ug19cs341/Desktop/BigData/Project/mdl')\n\n pred = modl.transform(df)\n print(pred.show())\n\n \n\n lines.pprint()\n lines.foreachRDD(process)\n\n ssc.start()\n ssc.awaitTermination()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"pranav6226/BD_116_341_374_386","sub_path":"sphm.py","file_name":"sphm.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44202856899","text":"import numpy as np\nfrom scipy.optimize import minimize\nfrom scipy.stats import poisson\n\n\ndef goal_expectancy(\n home: float, draw: float, away: float, dc_adj: bool = True, rho: float = 0.001\n) -> dict:\n \"\"\"\n Estimates the home and away team's goal expectancy based on the\n home, draw, away probabilities\n\n Parameters\n -----------\n home : float\n Probability of home win\n draw : float\n Probability of draw\n away : float\n Probability of away win\n dc_adj : bool\n Whether to apply the Dixon and Coles adjustment\n rho : float\n The value for rho within the Dixon and Coles adjustment\n\n Returns\n ----------\n Dictionary containing home team's goal expectancy, away team's goal expectancy,\n the mean squared error between actual probabilities and estimated probabilities,\n and whether the minimizer returned as successful or not\n \"\"\"\n # set up the basic options for the solver so we give up\n # after 1000 attempts and don't log to screen\n options = {\n \"maxiter\": 1000,\n \"disp\": False,\n }\n\n res = minimize(\n fun=_mse,\n x0=[0.5, -0.5],\n args=(home, draw, away, dc_adj, rho),\n options=options,\n )\n\n output = {\n \"home_exp\": np.exp(res[\"x\"][0]),\n \"away_exp\": np.exp(res[\"x\"][1]),\n \"error\": res[\"fun\"],\n \"success\": res[\"success\"],\n }\n\n return output\n\n\ndef _mse(\n params: list,\n home: float,\n draw: float,\n away: float,\n dc_adj: bool = True,\n rho: float = 0.001,\n):\n \"\"\"\n Loss function used internally by the `goal_expectancy function` to\n calculate the mean squared error of the estimate\n \"\"\"\n exp_params = np.exp(params)\n\n mu1 = poisson(exp_params[0]).pmf(np.arange(0, 15))\n mu2 = poisson(exp_params[1]).pmf(np.arange(0, 15))\n\n mat = np.outer(mu1, mu2)\n\n if dc_adj:\n # apply Dixon and Coles adjustment\n mat[0, 0] *= 1 - exp_params[0] * exp_params[1] * rho\n mat[0, 1] *= 1 + exp_params[0] * rho\n mat[1, 0] *= 1 + exp_params[1] * rho\n mat[1, 1] *= 1 - rho\n\n pred = np.array(\n [\n np.sum(np.tril(mat, -1)), # home\n np.sum(np.diag(mat)), # draw\n np.sum(np.triu(mat, 1)),\n ]\n ) # away\n\n obs = np.array([home, draw, away])\n\n mse = np.mean((pred - obs) ** 2)\n\n return mse\n","repo_name":"martineastwood/penaltyblog","sub_path":"penaltyblog/models/goal_expectancy.py","file_name":"goal_expectancy.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"81"} +{"seq_id":"13086341078","text":"import asyncio\nimport websockets\nimport json\nfrom datetime import datetime\nfrom decimal import Decimal\nimport pprint\nimport psycopg2\nfrom psycopg2 import Error\n\n\"\"\" Abbreviations for trade data:\n \"e\": \"trade\", // Event type\n \"E\": 123456789, // Event time\n \"s\": \"BNBBTC\", // Symbol\n \"t\": 12345, // Trade ID\n \"p\": \"0.001\", // Price\n \"q\": \"100\", // Quantity\n \"b\": 88, // Buyer order ID\n \"a\": 50, // Seller order ID\n \"T\": 123456785, // Trade time\n \"m\": true, // Is the buyer the market maker?\n \"M\": true // Ignore\n \"\"\"\n\n\nasync def handle_data(websocket):\n data = await websocket.recv()\n data = await parse_data(data)\n pp = pprint.PrettyPrinter(indent=4)\n await insert_data(data)\n pp.pprint(data)\n\nasync def parse_data(data):\n data = json.loads(data)\n data['E'] = datetime.fromtimestamp(data['E']/1000) # event time\n data['T'] = datetime.fromtimestamp(data['T']/1000) # trade time\n data['p'] = Decimal(data['p']) # price\n data['q'] = Decimal(data['q']) # quantity\n return data\n\nasync def insert_data(data):\n # Connect to an existing database\n try:\n connection = psycopg2.connect(user=\"postgres\",\n password=\"\",\n host=\"localhost\",\n port=\"5432\",\n database=\"binance\")\n cursor = connection.cursor()\n\n postgres_insert_query = \"\"\" INSERT INTO trades \n (symbol, price, quantity, datetime) \n VALUES (%s,%s,%s,%s);\"\"\"\n\n record_to_insert = (data['s'], data['p'],data['q'],data['T']) \n cursor.execute(postgres_insert_query, record_to_insert)\n connection.commit()\n\n except (Exception, psycopg2.Error) as error :\n if(connection):\n print(\"Failed to insert record into mobile table\", error)\n\n finally:\n #closing database connection.\n if (connection):\n cursor.close()\n connection.close() \n\n\nasync def hello():\n url = \"wss://stream.binance.com:9443/ws/btcusdt@trade\"\n async with websockets.connect(url) as websocket:\n while True:\n await asyncio.create_task(handle_data(websocket))\n\nasyncio.get_event_loop().run_until_complete(hello())","repo_name":"RikUM/binance-py","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37865719930","text":"from flask import Flask, request, jsonify, json\r\nfrom flask_api import status\r\nfrom jinja2 import Environment, FileSystemLoader\r\nfrom datetime import datetime,timedelta,date\r\nimport pymysql\r\nfrom flask_cors import CORS, cross_origin\r\nfrom flask import Blueprint\r\nfrom flask_restplus import Api, Resource, fields\r\nfrom werkzeug.utils import cached_property\r\nfrom werkzeug.datastructures import FileStorage\r\nimport requests\r\nimport random\r\nimport json\r\nimport string\r\nimport smtplib\r\nimport imghdr\r\nimport io\r\nimport re\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.image import MIMEImage\r\nfrom email.mime.text import MIMEText\r\n\r\napp = Flask(__name__)\r\ncors = CORS(app)\r\n\r\necommerce_organisation_otp = Blueprint('ecommerce_organisation_otp_api', __name__)\r\napi = Api(ecommerce_organisation_otp, title='Ecommerce API',description='Ecommerce API')\r\nname_space = api.namespace('EcommerceOrganisationOtp',description='Ecommerce Organisation Otp')\r\n\r\napp.config['CORS_HEADERS'] = 'Content-Type'\r\n\r\nEMAIL_ADDRESS = 'communications@creamsonservices.com'\r\nEMAIL_PASSWORD = 'CReam7789%$intELLi'\r\n#----------------------------------------------------#\r\n'''def mysql_connection():\r\n\tconnection = pymysql.connect(host='creamsonservices.com',\r\n\t user='creamson_langlab',\r\n\t password='Langlab@123',\r\n\t db='creamson_ecommerce',\r\n\t charset='utf8mb4',\r\n\t cursorclass=pymysql.cursors.DictCursor)\r\n\treturn connection\r\n\r\ndef mysql_connection_analytics():\r\n\tconnection_analytics = pymysql.connect(host='creamsonservices.com',\r\n\t user='creamson_langlab',\r\n\t password='Langlab@123',\r\n\t db='ecommerce_analytics',\r\n\t charset='utf8mb4',\r\n\t cursorclass=pymysql.cursors.DictCursor)\r\n\treturn connection_analytics'''\r\n\r\ndef mysql_connection():\r\n\tconnection = pymysql.connect(host='ecommerce.cdcuaa7mp0jm.us-east-2.rds.amazonaws.com',\r\n\t user='admin',\r\n\t password='oxjkW0NuDtjKfEm5WZuP',\r\n\t db='ecommerce',\r\n\t charset='utf8mb4',\r\n\t cursorclass=pymysql.cursors.DictCursor)\r\n\treturn connection\r\n\r\ndef ecommerce_analytics():\r\n\tconnection = pymysql.connect(host='ecommerce.cdcuaa7mp0jm.us-east-2.rds.amazonaws.com',\r\n\t user='admin',\r\n\t password='oxjkW0NuDtjKfEm5WZuP',\r\n\t db='ecommerce_analytics',\r\n\t charset='utf8mb4',\r\n\t cursorclass=pymysql.cursors.DictCursor)\r\n\treturn connection\r\n\r\n#-----------------------------------------------------#\r\n\r\necommerce_otp = api.model('ecommerce_otp',{\r\n\t\"USER_ID\":fields.Integer(),\r\n\t\"organisation_id\":fields.Integer(),\r\n\t\"role_id\":fields.Integer(),\r\n\t\"FIRST_NAME\":fields.String(),\r\n\t\"LAST_NAME\":fields.String(),\r\n\t\"MAIL_ID\":fields.String(),\r\n\t\"Address\":fields.String(),\r\n\t\"PHONE_NUMBER\":fields.String()\r\n\t})\r\n\r\n#------------------------------------------------------#\r\n@name_space.route(\"/GenerateOTP\")\r\nclass GenerateOTP(Resource):\r\n\t@api.expect(ecommerce_otp)\r\n\tdef post(self):\r\n\t\tconnection = mysql_connection()\r\n\t\tcursor = connection.cursor()\r\n\t\tdetails = request.get_json()\r\n\t\t\r\n\t\tUSER_ID = details.get('USER_ID')\r\n\t\torganisation_id = details['organisation_id']\r\n\t\trole_id = details['role_id']\r\n\t\tFIRST_NAME = details.get('FIRST_NAME')\r\n\t\tLAST_NAME = details.get('LAST_NAME')\r\n\t\tMAIL_ID = details.get('MAIL_ID')\r\n\t\tAddress = details.get('Address')\r\n\t\tPHONE_NUMBER = details['PHONE_NUMBER']\r\n\t\t\r\n\t\tif FIRST_NAME == '':\r\n\t\t\tFIRST_NAME = 'User'\r\n\t\telse:\r\n\t\t\tFIRST_NAME = FIRST_NAME\r\n\r\n\t\tdef get_random_digits(stringLength=6):\r\n\t\t Digits = string.digits\r\n\t\t return ''.join((random.choice(Digits) for i in range(stringLength)))\r\n\t\t\r\n\t\totp = get_random_digits()\r\n\t\t\t\r\n\t\totp_query = (\"\"\"INSERT INTO `organisation_user_otp`(`USER_ID`,\r\n\t\t\t`organisation_id`,`OTP`,`role_id`,`FIRST_NAME`,`LAST_NAME`,\r\n\t\t\t`GENERATED_BY`, `MAIL_ID`, `Address`, `PHONE_NUMBER`) \r\n\t\t\tVALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\"\")\r\n\t\totpdata = cursor.execute(otp_query,(USER_ID,organisation_id,otp,role_id,\r\n\t\t\tFIRST_NAME,LAST_NAME,'System',MAIL_ID,Address,PHONE_NUMBER))\r\n\r\n\t\tif otpdata:\r\n\t\t\tdetails['OTP'] = otp \r\n\t\t\t#----------------------------sms-----------------------#\r\n\t\t\t'''url = \"http://cloud.smsindiahub.in/vendorsms/pushsms.aspx?\"\r\n\t\t\tuser = 'creamsonintelli'\r\n\t\t\tpassword = 'denver@1234'\r\n\t\t\tmsisdn = PHONE_NUMBER\r\n\t\t\tsid = 'CRMLTD'\r\n\t\t\tmsg = 'Hi '+FIRST_NAME+' The OTP for the Online Transaction is '+otp+'. This OTP is valid only for one time use.'\r\n\t\t\tfl = '0'\r\n\t\t\tgwid = '2'\r\n\t\t\tpayload =\"user={}&password={}&msisdn={}&sid={}&msg={}&fl={}&gwid={}\".format(user,password,\r\n\t\t\t\tmsisdn,sid,msg,fl,gwid)\r\n\t\t\tpostUrl = url+payload\r\n\t\t\t# print(msg)\r\n\t\t\tresponse = requests.request(\"POST\", postUrl)\r\n\r\n\t\t\tif response.text == 'Failed#Invalid LoginThread was being aborted.':\r\n\t\t\t\tsent = 'N'\r\n\t\t\telse:\t\r\n\t\t\t\tsms_response = json.loads(response.text)['ErrorMessage']\r\n\t\t\t\t# print(sms_response)\r\n\t\t\t\tres = {\"status\":sms_response}\r\n\t\t\t\tif res['status'] == 'Success':\r\n\t\t\t\t\tsent = 'Y'\r\n\r\n\t\t\t\t\tsms_query = (\"\"\"INSERT INTO `otp_sms`(`title`,`body`,\r\n\t\t\t\t\t\t`phone_number`,`Sent`,`organisation_id`) \r\n\t\t\t\t\t\tVALUES(%s,%s,%s,%s,%s)\"\"\")\r\n\t\t\t\t\tsmsdata = cursor.execute(sms_query,('OTP',msg,PHONE_NUMBER,\r\n\t\t\t\t\t\t'Yes',organisation_id))\r\n\r\n\t\t\t\telse:\r\n\t\t\t\t\tsent = 'N'\t\t\t'''\r\n\r\n\t\t\tif PHONE_NUMBER == '9933077180':\r\n\t\t\t\tprint('hello')\r\n\t\t\telse:\r\n\t\t\t\turl = \"https://enterprise.smsgupshup.com/GatewayAPI/rest?method=SendMessage\"\r\n\t\t\t\tuserid = 2000207272\r\n\t\t\t\tpassword = '5thrMk8f4'\r\n\t\t\t\tmsg = \"This OTP is to Validate your mobile phone number is \"+otp+\". Please do not share your OTP with anyone else. -AM Mobile Telecom Pvt Ltd\"\r\n\t\t\t\tpayload =\"&send_to={}&msg={}&msg_type=TEXT&userid={}&auth_scheme=plain&password={}&v=1.1&format=text\".format(PHONE_NUMBER,msg,\r\n\t\t\t\t\tuserid,password)\r\n\t\t\t\tpostUrl = url+payload\r\n\t\t\t\tprint(postUrl)\r\n\t\t\t\t# print(msg)\r\n\t\t\t\tresponse = requests.request(\"GET\", postUrl)\r\n\r\n\r\n\t\t\t\tresponse_text = response.text\r\n\r\n\t\t\t\tif \"error\" in response_text:\r\n\t\t\t\t\tsent = 'N'\r\n\t\t\t\telse:\r\n\t\t\t\t\t\r\n\t\t\t\t\tsent = 'Y'\r\n\r\n\t\t\t\t\tsms_query = (\"\"\"INSERT INTO `otp_sms`(`title`,`body`,\r\n\t\t\t\t\t\t\t`phone_number`,`Sent`,`organisation_id`) \r\n\t\t\t\t\t\t\tVALUES(%s,%s,%s,%s,%s)\"\"\")\r\n\t\t\t\t\tsmsdata = cursor.execute(sms_query,('OTP',msg,PHONE_NUMBER,\r\n\t\t\t\t\t\t\t'Yes',organisation_id))\t\t\r\n\r\n\t\t\t\r\n\t\t\t#----------------------------sms-----------------------#\r\n\r\n\t\t\t#----------------------------mail----------------------#\r\n\t\t\tcursor.execute(\"\"\"SELECT `email` FROM `organisation_master` \r\n\t\t\t\tWHERE `phoneno`=%s\"\"\",(PHONE_NUMBER))\r\n\t\t\ttoMail = cursor.fetchone()\r\n\r\n\t\t\tif toMail == None:\r\n\t\t\t\tcursor.execute(\"\"\"SELECT `email` FROM `admins` WHERE \r\n\t\t\t\t\t`phoneno`=%s\"\"\",(PHONE_NUMBER))\r\n\t\t\t\ttoMailcus = cursor.fetchone()\r\n\t\t\t\tif toMailcus:\r\n\t\t\t\t\tuser_info = toMailcus['email']\r\n\t\t\t\telse:\r\n\t\t\t\t\tuser_info = ''\r\n\t\t\telse:\r\n\t\t\t\tuser_info = toMail['email']\r\n\t\t\t\r\n\t\t\tif user_info:\r\n\t\t\t\tmsg = MIMEMultipart()\r\n\t\t\t\tmsg['Subject'] = 'Verification code'\r\n\t\t\t\tmsg['From'] = EMAIL_ADDRESS\r\n\t\t\t\tmsg['To'] = user_info\r\n\t\t\t\thtml = \"\"\"<html>\r\n\t <head>\r\n\t <title>E-Commerce.com\r\n\t \r\n\t \r\n\t

    \r\n\t Dear User,

    \r\n\t \r\n\t Your OTP is %s. This OTP is valid only for one time use.

    \r\n\t\t\t\t \r\n\t\t\t\t Thank you for choosing E-Commerce


    \r\n\t \r\n\t \r\n\t \r\n\t E-Commerce Support Team
    \r\n\t E-mail: - support@ecommerce.com
    \r\n\t Website: www.ecommerce.com
    \r\n\t \r\n\t

    \r\n\t \"\"\"\r\n\t\t\t\tmessage = html % (otp)\r\n\t\t\t\t# print(message)\r\n\t\t\t\tpart1 = MIMEText(message, 'html')\r\n\t\t\t\tmsg.attach(part1)\r\n\t\t\t\ttry:\r\n\t\t\t\t\tsmtp = smtplib.SMTP('mail.creamsonservices.com', 587)\r\n\t\t\t\t\tsmtp.starttls()\r\n\t\t\t\t\tsmtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\r\n\t\t\t\t\tsmtp.sendmail(EMAIL_ADDRESS, user_info, msg.as_string())\r\n\t\t\t\t\t\r\n\t\t\t\t\tres = {\"status\":'Success'}\r\n\t\t\t\t\tsent = 'Y'\r\n\t\t\t\t\tprint(sent)\r\n\t\t\t\t\t\r\n\t\t\t\texcept Exception as e:\r\n\t\t\t\t\tres = {\"status\":'Failure'}\r\n\t\t\t\t\tsent = 'N'\r\n\t\t\t\t\tprint(sent)\r\n\t\t\t\t\t# raise e\r\n\t\t\t\tsmtp.quit()\r\n\t\t\t\r\n\r\n\t\t#----------------------------mail----------------------#\r\n\r\n\t\telse:\r\n\t\t\tdetails = []\r\n\r\n\t\tconnection.commit()\r\n\t\tcursor.close()\r\n\t\treturn ({\"attributes\": {\"status_desc\": \"ECommerce OTP\",\r\n\t \"status\": \"success\"\r\n\t },\r\n\t\t\t\t\"responseList\":details}), status.HTTP_200_OK\r\n\r\n\r\n","repo_name":"sutandra-coder/ecommerce","sub_path":"ecommerce_organisation_otp.py","file_name":"ecommerce_organisation_otp.py","file_ext":"py","file_size_in_byte":9049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20198370296","text":"from utils import gen_random_str\nfrom models import db, DxccPrefixes, DxccExceptions, DxccEntities\n\n\ndef test_debug_testing(app):\n \"\"\"\n For some reasons, the tests will be EXTREMELY SLOW if DEBUG=True\n I have no effing idea why.\n See conftest.py:db() for more.\n \"\"\"\n assert not app.config[\"DEBUG\"]\n assert app.config[\"TESTING\"]\n\n\ndef test_gen_random_str():\n times = 0\n while True:\n a = gen_random_str(20)\n b = gen_random_str(20)\n assert a != b\n times += 1\n if times >= 100:\n break\n\n\ndef test_import_xml_offline(client, session, app):\n # This test rely on import_cty() called in db() of conftest\n _dp = db.session.query(DxccPrefixes.id).count()\n _dx = db.session.query(DxccExceptions.id).count()\n _de = db.session.query(DxccEntities.id).count()\n # Count as of 25/08/16 was:\n # 3665 16435 401\n # check are lowered to avoid changing them maybe too frequently\n assert _dp >= 3500\n assert _dx >= 16300\n assert _de >= 300\n","repo_name":"rhaamo/ahrl","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36470241992","text":"#!/usr/bin/env python\n\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist\n\nclass Foward(): \n def __init__(self):\n #self.decision = \"\"\n self.cmd_pub = rospy.Publisher('cmd_vel_mux/input/navi', Twist, queue_size=1)\n self.sub = rospy.Subscriber('turning', String, self.decideForward)\n self.r = rospy.Rate(250) # 250hz\n self.linear_speed = 0.5\n self.move_cmd = Twist()\n self.start()\n\n\n def decideForward(self, data):\n self.decision=data.data\n if self.decision==\"T\":\n print(self.decision)\n self.stopMoving()\n else:\n print(self.decision)\n self.forward()\n \n def stopMoving(self):\n self.move_cmd.linear.x=0\n self.move_cmd.linear.z=0\n def forward(self):\n #print(\"MOVING FORWARD\\n\")\n self.move_cmd.linear.x = self.linear_speed\n #print(self.move_cmd)\n #self.cmd_pub.publish(self.move_cmd)\n \n def start(self):\n while not rospy.is_shutdown():\n self.cmd_pub.publish(self.move_cmd)\n self.r.sleep()\n \n \ndef main():\n rospy.init_node('Forward')\n try:\n Foward()\n except rospy.ROSInterruptException:\n pass\n\nif __name__ == '__main__':\n main()","repo_name":"Samb55/ROS-explore-environment-","sub_path":"src/forward.py","file_name":"forward.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41022047566","text":"# 添加问题题干、通过数、提交数、建立相似问题关系数据\nimport emoji\nimport pymysql\nimport http.cookiejar as cookielib\nimport urllib.request\nimport requests\nimport json\n\n# import datetime\n\n# 初始化可获取问题列表\nfrom docutils.writers import null\n\nquestion_list = []\n\n# 打开数据库连接\ndb = pymysql.connect(\"localhost\", \"root\", \"11\", \"leetcodespyder\")\n\n# 使用cursor()方法获取操作游标\ncursor = db.cursor()\n# question_sql 查询语句获取非付费问题列表\nquestion_sql = \"SELECT QUESTION_ID, TITLESLUG ,TEANSLATEDCONTENT FROM QUESTION_copy \"\n# question_sql = \"SELECT QUESTION_ID, TITLESLUG,translatedtitle FROM QUESTION \"\n# question_sql = \"SELECT QUESTION_ID, TITLESLUG FROM enterprise WHERE SOLUTION_NUM IS NULL \"\ntry:\n # 执行question_sql语句\n cursor.execute(question_sql)\n # 获取所有记录列表\n results = cursor.fetchall()\n for row in results:\n if row[2]=='None' or row[2]=='' or row[2]==None:\n # 问题信息列表\n question = []\n question.append(row[0])\n question.append(row[1])\n # 问题列表\n question_list.append(question)\n print(\"问题列表请求成功!\")\nexcept:\n print(\"请求失败!\")\n\n# print(question_list)\n\n# 请求头信息\nurl = \"https://leetcode-cn.com/graphql\"\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\npayloadData = {\"operationName\": \"questionData\", \"variables\": {\"titleSlug\": \"two-sum\"},\n \"query\": \"query questionData($titleSlug: String!) {\\n question(titleSlug: $titleSlug) {\\n questionId\\n questionFrontendId\\n boundTopicId\\n title\\n titleSlug\\n content\\n translatedTitle\\n translatedContent\\n isPaidOnly\\n difficulty\\n likes\\n dislikes\\n isLiked\\n similarQuestions\\n contributors {\\n username\\n profileUrl\\n avatarUrl\\n __typename\\n }\\n langToValidPlayground\\n topicTags {\\n name\\n slug\\n translatedName\\n __typename\\n }\\n companyTagStats\\n codeSnippets {\\n lang\\n langSlug\\n code\\n __typename\\n }\\n stats\\n hints\\n solution {\\n id\\n canSeeDetail\\n __typename\\n }\\n status\\n sampleTestCase\\n metaData\\n judgerAvailable\\n judgeType\\n mysqlSchemas\\n enableRunCode\\n enableTestMode\\n envInfo\\n __typename\\n }\\n}\\n\"}\npayloadData_ = {\"operationName\": \"solutionCount\", \"variables\": {\"questionSlug\": \"intersection-of-three-sorted-arrays\"},\n \"query\": \"query solutionCount($questionSlug: String!) {\\n solutionNum(questionSlug: $questionSlug)\\n}\\n\"}\ncookies = {\n 'cookie': '_uab_collina=159184635745193904444315; csrftoken=MbBXVJUmcjYOLderbCL5D4VaWaTB5gSgt3DTgGc6TA5BkWNbvurUJKOM5ljTBnEm; LEETCODE_SESSION=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJfYXV0aF91c2VyX2lkIjoiMzk4MjE4IiwiX2F1dGhfdXNlcl9iYWNrZW5kIjoiYXV0aGVudGljYXRpb24uYXV0aF9iYWNrZW5kcy5QaG9uZUF1dGhlbnRpY2F0aW9uQmFja2VuZCIsIl9hdXRoX3VzZXJfaGFzaCI6ImY3NDM0ODlmMDhmOTFhNjM3MjI0MmNlOWMxMWQ5YjJjMGUxYmQxOWY0ZDAyOTc4ZmI3ODc3NjJlYThjMzIwZTIiLCJpZCI6Mzk4MjE4LCJlbWFpbCI6Im9vb2h1b0BnbWFpbC5jb20iLCJ1c2VybmFtZSI6Im9vaHVvIiwidXNlcl9zbHVnIjoib29odW8iLCJhdmF0YXIiOiJodHRwczovL2Fzc2V0cy5sZWV0Y29kZS1jbi5jb20vYWxpeXVuLWxjLXVwbG9hZC91c2Vycy9haWVyYnVkZS9hdmF0YXJfMTU1MTM1MTcwNS5wbmciLCJwaG9uZV92ZXJpZmllZCI6dHJ1ZSwiX3RpbWVzdGFtcCI6MTYwMDc2NzI5MC44MTY5NDN9.LPi0UyODHCmk7nNiuBtSQy4_OmV2O0oEFxab6FofJfA'}\n# 开始循环遍历请求题目详细信息\nfor question in question_list:\n # print(\"开始请求获取问题:\", question[1], \"问题编号:\", question[0])\n\n # 设定请求标题\n payloadData[\"variables\"][\"titleSlug\"] = question[1]\n payloadData_[\"variables\"][\"questionSlug\"] = question[1]\n # 请求数据\n question_data = requests.post(url, json=payloadData, headers=headers, cookies=cookies)\n questions_json = json.loads(question_data.text)\n\n question_data_ = requests.post(url, json=payloadData_, headers=headers, cookies=cookies)\n questions_json_ = json.loads(question_data_.text)\n # print(questions_json)\n\n # 输出题目信息\n # print(questions_json[\"data\"][\"question\"][\"translatedTitle\"])\n # print(questions_json[\"data\"][\"question\"][\"content\"])\n # print(questions_json[\"data\"][\"question\"][\"translatedContent\"])\n # print(json.loads(questions_json[\"data\"][\"question\"][\"stats\"])[\"totalAcceptedRaw\"])\n # print(json.loads(questions_json[\"data\"][\"question\"][\"stats\"])[\"totalSubmissionRaw\"])\n\n # 生成信息插入SQL语句\n question_sql = \"UPDATE QUESTION_copy SET \"\n # question_sql = \"UPDATE enterprise SET \"\n # if content exist \"xx' xx\" ,maybe print a bug.\n question_sql = question_sql + \"CONTENT=\" + \"\\'\" + str(questions_json[\"data\"][\"question\"][\"content\"]).replace('\\'',\n '‘') + '\\', '\n question_sql = question_sql + \"TRANSLATEDTITLE=\" + \"\\\"\" + str(\n questions_json[\"data\"][\"question\"][\"translatedTitle\"]) + '\\\", '\n\n question_sql = question_sql + \"TEANSLATEDCONTENT=\" + \"\\'\" + emoji.demojize(str(\n questions_json[\"data\"][\"question\"][\"translatedContent\"]).replace(\"'\", \"’\")) + '\\', '\n\n question_sql = question_sql + \"ACCEPT=\" + str(\n json.loads(questions_json[\"data\"][\"question\"][\"stats\"])[\"totalAcceptedRaw\"]) + ', '\n\n question_sql = question_sql + \"SOLUTION_NUM=\" + str(json.loads(str(questions_json_[\"data\"][\"solutionNum\"]))) + ', '\n\n question_sql = question_sql + \"SUBMISSION=\" + str(\n json.loads(questions_json[\"data\"][\"question\"][\"stats\"])[\"totalSubmissionRaw\"]) + ' '\n\n question_sql = question_sql + \"WHERE QUESTION_ID=\" + str(question[0])\n # print (\"信息更新SQL语句:\", question_sql)\n # selectSQL = \"select TRANSLATEDTITLE from question where QUESTION_ID=\"\n # selectSQL = selectSQL + str(question[0]) + '; '\n try:\n # # 执行sql语句\n # cursor.execute(selectSQL)\n # # 提交到数据库执行\n # results = cursor.fetchall()\n # print(results[0][0])\n # if results[0][0] == '' or results[0][0] =='None':\n # 执行sql语句\n cursor.execute(question_sql)\n # 提交到数据库执行\n db.commit()\n # print(question_sql)\n print(question[0],\"更新\",\"\\033[1;32;40mSUCCESS!\\033[0m\")\n except:\n # 如果发生错误则回滚\n db.rollback()\n print(\"信息更新SQL语句:\", question_sql)\n print(\"question更新\", str(question[0]), \"\\033[1;31;40mERROR!\\033[0m\")\n\n # # 抽取相似问题列表\n # similar_questions = json.loads(questions_json[\"data\"][\"question\"][\"similarQuestions\"])\n # for i in similar_questions:\n # # 根据相似问题请求标题获取该问题ID\n # select_sql = \"select QUESTION_ID from question where TITLESLUG = \" + \"\\\"\" + i[\"titleSlug\"] + \"\\\"\"\n # # print(\"select_sql:\",select_sql)\n # try:\n # # 执行question_sql语句\n # cursor.execute(select_sql)\n # # 获取所有记录列表\n # results = cursor.fetchall()\n # # print(\"\\033[1;32;40mselect_SUCCESS!\\033[0m\")\n # for row in results:\n # similar_question = []\n # similar_question.append(row[0])\n # # print(row[0])\n # for j in similar_question:\n # similar_sql = \"INSERT INTO SIMILAR(QUE_ID,SIM_ID) VALUES (\"\n # similar_sql = similar_sql + str(question[0]) + \", \"\n # similar_sql = similar_sql + str(j) + \")\"\n # # print (\"相似题目插入SQL语句:\", similar_sql)\n # try:\n # # 执行sql语句\n # cursor.execute(similar_sql)\n # # 提交到数据库执行\n # db.commit()\n # # print(\"question插入\", \"\\033[1;32;40mSUCCESS!!\\033[0m\")\n # except:\n # # 如果发生错误则回滚\n # db.rollback()\n # print(\"similar插入\", \"\\033[1;31;40mERROR!!\\033[0m\")\n # except:\n # # 如果发生错误则回滚\n # db.rollback()\n # print(\"\\033[1;31;40mselect_ERROR!\\033[0m\")\n # print(\"QUESTION_ID:\", question[0], \"\\033[1;32;40m请求完毕!!!!!\\033[0m\")\n\n# 关闭数据库连接\ndb.close()\n\nprint(\"爬取结束!\")\n","repo_name":"Oohuo/SpiderOfLC","sub_path":"2-题相关数据.py","file_name":"2-题相关数据.py","file_ext":"py","file_size_in_byte":8596,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"17811670488","text":"\"\"\"Helper functions for the model\n\"\"\"\nimport math\nimport random\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom PIL import Image\n\n\nclass AverageMeter(object):\n \"\"\"Stores loss and intersectin/union pixel values\"\"\"\n def __init__(self):\n self.val = None\n self.sum = None\n self.count = None\n self.avg = None\n self.ema = None # ema = exponential moving averages\n self.initialized = False\n\n def update(self, val, n=1):\n if not self.initialized:\n self.initialize(val, n)\n else:\n self.add(val, n)\n\n def initialize(self, val, n):\n self.val = val\n self.sum = val * n \n self.count = n\n self.avg = val\n self.ema = val\n self.initialized = True\n\n def add(self, val, n):\n \"\"\"\n params:\n val: loss value of each mini-batch\n n: batch size\n \"\"\"\n self.val = val \n self.sum += val * n # estimates loss per sample\n self.count += n \n self.avg = self.sum / self.count # average loss per epoch\n self.ema = self.ema * 0.99 + self.val * 0.01\n\n\ndef inter_and_union(pred, mask, num_class):\n # Explanation of this at: https://github.com/bradford415/deeplabv3-pytorch/blob/main/utils.py\n pred = np.asarray(pred, dtype=np.uint8).copy()\n mask = np.asarray(mask, dtype=np.uint8).copy()\n\n # 255 -> 0\n pred += 1\n mask += 1\n pred = pred * (mask > 0)\n\n inter = pred * (pred == mask)\n (area_inter, _) = np.histogram(inter, bins=num_class, range=(1, num_class))\n (area_pred, _) = np.histogram(pred, bins=num_class, range=(1, num_class))\n (area_mask, _) = np.histogram(mask, bins=num_class, range=(1, num_class))\n area_union = area_pred + area_mask - area_inter\n\n return (area_inter, area_union)\n\n\ndef preprocess(image, mask, flip=False, scale=None, crop=None):\n \"\"\"Preprocess images as defined in the deeplabv3 paper. This includes\n random resizing from 0.5-2.0, horizontal flipping, random cropping, and \n normalizing the values based on the mean and standard deviation of the \n pretrained network dataset (ImageNet)\n\n MAKE SURE YOU PERFORM THE SAME TRANSFORM, WITH THE SAME TRANSFORM VALUES, \n FOR THE IMAGE AND LABEL. It is shown here how to do this:\n https://discuss.pytorch.org/t/torchvision-transfors-how-to-perform-identical-transform-on-both-image-and-target/10606/7\n \n The mean and standard deviation from the ImageNet dataset are used because we pretrain\n deeplab on ImageNet.\n\n Training applies crop, flip, resize, and normalization\n \"\"\"\n if flip:\n if random.random() < 0.5:\n image = image.transpose(Image.FLIP_LEFT_RIGHT)\n mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\n\n if scale:\n w, h = image.size\n rand_log_scale = math.log(scale[0], 2) + random.random() * (\n math.log(scale[1], 2) - math.log(scale[0], 2))\n random_scale = math.pow(2, rand_log_scale)\n new_size = (int(round(w * random_scale)), int(round(h * random_scale)))\n image = image.resize(new_size, Image.ANTIALIAS)\n mask = mask.resize(new_size, Image.NEAREST)\n\n data_transforms = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n image = data_transforms(image)\n mask = torch.LongTensor(np.array(mask).astype(np.int64))\n\n \n \n if crop:\n h, w = image.shape[1], image.shape[2]\n pad_tb = max(0, crop[0] - h)\n pad_lr = max(0, crop[1] - w)\n image = torch.nn.ZeroPad2d((0, pad_lr, 0, pad_tb))(image)\n mask = torch.nn.ConstantPad2d((0, pad_lr, 0, pad_tb), 255)(mask)\n\n h, w = image.shape[1], image.shape[2]\n i = random.randint(0, h - crop[0])\n j = random.randint(0, w - crop[1])\n image = image[:, i:i + crop[0], j:j + crop[1]]\n mask = mask[i:i + crop[0], j:j + crop[1]]\n\n #print(mask.shape)\n \n return image, mask\n\n\ndef colorize(prediction, save_name, cmap='cityscapes'):\n prediction = prediction.numpy().squeeze().astype(np.uint8)\n pred_pil = Image.fromarray(prediction)\n pred_pil.putpalette(cmap)\n pred_pil.save(save_name)\n \n","repo_name":"bradford415/low-power-segmentation","sub_path":"utils/.ipynb_checkpoints/utils-checkpoint.py","file_name":"utils-checkpoint.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11197250923","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom airtable import Airtable\nimport os\n\n# connect to airtable API\nAT = Airtable(os.environ.get('AIRTABLE_MOVIESTABLE_BASE_ID'),\n 'Movies',\n api_key=os.environ.get('AIRTABLE_API_KEY'))\n\n# Create your views here.\n# home_page is a function, takes in an argument 'request'\n# def myview(request):\ndef home_page(request):\n #request.GET.get(‘query’, ‘’) returns the search word ‘five things'\n# print(str(request.GET.get('query', '')))\n user_query = str(request.GET.get('query', ''))\n # use the airtable python wrapper get_all and formula\n # FIND(1st arg user query, 2nd arg API search) {name} is name field in the dictionary?\n search_result = AT.get_all(sort=[('Ranking', 'asc')], formula=\"FIND('\" + user_query.lower() + \"', LOWER({Name}))\")\n # send result to front end, generally use a context dictionary, key-value pair\n # name the key as 'search_result'\n # context variable: store the search result in the value of the dictionary, name the key search_result\n stuff_for_frontend = {'search_result': search_result}\n # when return, render the 'request' in this html file. the context dictionary is: stuff_for_frontend\n return render(request, 'movies/movies_stuff.html', stuff_for_frontend)\n\ndef create(request):\n print('haha')\n if request.method == 'POST':\n data = {\n # from the modal's input\n 'Name': request.POST.get('name'),\n #Pictures is a list\n 'Pictures': [{'url': request.POST.get('url') or 'https://upload.wikimedia.org/wikipedia/commons/a/ac/No_image_available.svg'}],\n 'Rating': int(request.POST.get('rating')),\n 'Notes': request.POST.get('notes'),\n 'Ranking': int(request.POST.get('rank'))\n }\n # when the insert function is actinoed, there will be an respnse to show us whatit's done\n # we can store the response in a variable\n try:\n response = AT.insert(data)\n # notify on create\n messages.success(request, 'New movie added: {}'.format(response['fields'].get('Name')))\n except Exception as e:\n messages.warning(request, 'Got an error when trying to update a mvoie: {}'.format(e))\n #once this function runs, take me back to the root directory/ homepage/ yourapp.com\n return redirect('/')\n\ndef edit(request, movie_id):\n if request.method == 'POST':\n data = {\n 'Name': request.POST.get('name'),\n 'Pictures': [{'url': request.POST.get('url') or 'https://www.freeiconspng.com/uploads/no-image-icon-23.jpg'}],\n 'Rating': int(request.POST.get('rating')),\n 'Notes': request.POST.get('notes'),\n 'Ranking': int(request.POST.get('rank'))\n }\n try:\n response = AT.update(movie_id, data)\n # notify on update\n messages.success(request, 'Updated movie: {}'.format(response['fields'].get('Name')))\n except Exception as e:\n messages.warning(request, 'Got an error when trying to update a movie: {}'.format(e))\n return redirect('/')\n\ndef delete(request, movie_id):\n try:\n # print(movie_id)\n #retrieve the name first for the message tag, before deleting the item\n movie_name =AT.get(movie_id)['fields'].get('Name')\n # note before AT.delete, request doesn't have anything =>we used AT.get(movie_id) instead of reponse['fields']\n response = AT.delete(movie_id)\n # notify on delete, here we use the movie_name variable which already have the movie name.\n messages.warning(request, 'Deleted movie: {}'.format(movie_name))\n except Exception as e:\n messages.warning(request, 'Got an error when trying to delete a movie: {}'.format(e))\n return redirect('/')\n","repo_name":"DeveloperMarcusW/django-my-fav-movie-web-app","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5968630583","text":"from asyncio import sleep as asleep\nfrom requests import get as rget\nfrom config import LOGGER, USERS_API\nfrom bot.client import Client\nfrom pyrogram import filters, enums\nfrom pyrogram.types import Message, ForceReply\n\n@Client.on_message(filters.command(\"login\") & filters.private)\nasync def login_handler(c: Client, m: Message):\n ''' Login Into Bot to Use Bot Features\n :param token: Your Own API token of UploadEver.in\n '''\n\n if m.chat.id in USERS_API.keys():\n await m.reply_text(text=\"😑 You have Already Login, If you want to Logout, Use /logout\", parse_mode=enums.ParseMode.HTML, quote=True)\n return\n auth_msg = await m.reply_text(text=\"🖨 Bot Authorization: \\n\\nYou can Get/Generate/Copy API Token from https://uploadever.in/?op=my_account\", parse_mode=enums.ParseMode.HTML, reply_markup=ForceReply(True, \"Enter UploadEver.in API Key\"))\n input_msg: Message = await c.listen(m.chat.id)\n Token = input_msg.text\n if Token is None:\n await auth_msg.delete()\n await m.reply_text(\"👤 Login Process Cancelled!!\")\n return await input_msg.continue_propagation()\n elif Token and Token.startswith(\"/\"):\n await auth_msg.delete()\n await m.reply_text(\"👤 Login Process Cancelled!\")\n return await input_msg.continue_propagation()\n else:\n await input_msg.delete()\n await auth_msg.delete()\n await asleep(2)\n resp = rget(f\"https://uploadever.in/api/account/info?key={Token}\")\n jdata = resp.json()\n if jdata[\"status\"] == 200:\n USERS_API[m.chat.id] = Token\n LOGGER.info(f\"[UploadEver.in] User: {m.chat.id} Log In Success\")\n txt = f\"{jdata['result']['email']} Successfully Logged In ✅️!!\"\n else:\n LOGGER.info(f\"[UploadEver.in] User: {m.chat.id} Log In Unsuccessful\")\n txt = jdata['msg']\n await m.reply_text(text=txt, parse_mode=enums.ParseMode.HTML, quote=True)\n\n\n@Client.on_message(filters.command(\"logout\") & filters.private)\nasync def logout_handler(c: Client, m: Message):\n ''' Logout from Bot to Disable Bot Features\n :param token: Your Own API token of UploadEver.in\n '''\n\n try:\n USERS_API.pop(m.chat.id)\n text_ = \"🥲 You Successfully Logout. Do /login to Come Again\"\n except KeyError:\n text_ = \"😬 I see, you have not Login, Do /login to Use this Command. \"\n await m.reply_text(text=text_, parse_mode=enums.ParseMode.HTML, quote=True)\n","repo_name":"bbrooks870/UploadEver-TBot","sub_path":"bot/plugins/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3570137818","text":"\"\"\"Module for DB creation for the M5-Project.\"\"\"\n\nimport logging\nimport os\nimport sqlite3\nfrom pandas.io.parsers import TextFileReader\nfrom zipfile import ZipFile\n\nimport pandas as pd\nfrom kaggle import api\nfrom tqdm import tqdm\n\nCOMP_NAME = 'm5-forecasting-accuracy'\n\n\nclass DBGenerator:\n \"\"\"Class to generate the DB for the M5-Project.\"\"\"\n\n def __init__(self, generator_dict, work_dir):\n \"\"\"Initialize DBGenerator.\"\"\"\n self.generator = generator_dict\n\n self.logger = self._get_logger()\n\n self.logger.info('Initialized')\n\n self._validate_generator()\n\n if 'm5.db' in os.listdir(work_dir):\n os.remove('m5.db')\n\n self.connection = sqlite3.connect(work_dir + '/m5.db')\n\n def create_db(self):\n \"\"\"Create Database.\"\"\"\n self._create_sales_info_table()\n self._create_other_table('calendar')\n self._create_other_table('price')\n\n def _create_sales_info_table(self):\n self.logger.info('Creating info and sales table...')\n\n for chunk in tqdm(self.generator['sales']):\n sales_table, info_table = self._split_and_melt(chunk)\n\n info_table.to_sql(\n 'info', if_exists='append', index=False, con=self.connection\n )\n\n sales_table.to_sql(\n 'sales', if_exists='append', index=False, con=self.connection\n )\n\n self.logger.info('Sales and info tables created.')\n\n def _create_other_table(self, table_name):\n self.logger.info(f'Creating {table_name} table...')\n for chunk in self.generator[table_name]:\n chunk.to_sql(\n table_name,\n con=self.connection,\n index=False,\n if_exists='append'\n )\n\n self.logger.info(f'{table_name} table created...')\n\n def _validate_generator(self):\n self.logger.info('Validating generator dict...')\n expected_keys = {'sales', 'calendar', 'price'}\n\n assert expected_keys == set(self.generator)\n\n for key in self.generator:\n assert isinstance(self.generator[key], TextFileReader)\n\n self.logger.info('Validation Successful!')\n\n @staticmethod\n def _split_and_melt(chunk):\n info_cols = {\n 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id'\n }\n\n sales_cols = set(chunk) - info_cols\n\n sales = chunk.loc[:, sales_cols].copy()\n info = chunk.loc[:, {'id'}.union(info_cols)].copy()\n\n sales = sales.melt(\n id_vars=['id'],\n var_name='day',\n value_name='sales'\n )\n\n return sales, info\n\n @staticmethod\n def _get_logger():\n logging.basicConfig(\n filename='db_generator.log',\n format=\"%(levelname)s %(asctime)s - %(message)s\",\n level=20,\n filemode='w'\n )\n\n return logging.getLogger()\n\n @classmethod\n def from_api(cls, work_dir, persist=False):\n \"\"\"Construct DB Generator with Kaggle API.\"\"\"\n api.competition_download_cli(\n COMP_NAME,\n path=work_dir\n )\n zip_path = work_dir + '/' + COMP_NAME + '.zip'\n zipfile = ZipFile(zip_path)\n\n if not persist:\n os.remove(zip_path)\n\n generator_dict = {\n 'sales': pd.read_csv(\n zipfile.open('sales_train_validation.csv'), chunksize=10000\n ),\n 'calendar': pd.read_csv(\n zipfile.open('calendar.csv'), chunksize=10000\n ),\n 'price': pd.read_csv(\n zipfile.open('sell_prices.csv'), chunksize=10000\n )\n }\n\n db_gen = cls(generator_dict, work_dir)\n\n return db_gen\n\n @classmethod\n def from_zipfile(cls, work_dir):\n \"\"\"Construct DB generator from Zipfile.\"\"\"\n zip_path = work_dir + '/' + COMP_NAME + '.zip'\n zipfile = ZipFile(zip_path)\n\n generator_dict = {\n 'sales': pd.read_csv(\n zipfile.open('sales_train_validation.csv'), chunksize=10000\n ),\n 'calendar': pd.read_csv(\n zipfile.open('calendar.csv'), chunksize=10000\n ),\n 'price': pd.read_csv(\n zipfile.open('sell_prices.csv'), chunksize=10000\n )\n }\n\n db_gen = cls(generator_dict, work_dir)\n\n return db_gen\n\n @classmethod\n def from_directory(cls, work_dir):\n \"\"\"Generate DB Generator from directory with csv files.\"\"\"\n generator_dict = {\n 'sales': pd.read_csv(\n f'{work_dir}/sales_train_validation.csv', chunksize=10000\n ),\n 'calendar': pd.read_csv(\n f'{work_dir}/calendar.csv', chunksize=10000\n ),\n 'price': pd.read_csv(\n f'{work_dir}/sell_prices.csv', chunksize=10000\n )\n }\n\n db_gen = cls(generator_dict, work_dir)\n\n return db_gen\n","repo_name":"singhnavi25/M5-Forecasting-Accuracy","sub_path":"src/db_generator.py","file_name":"db_generator.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"23620297604","text":"from oauth2client import client\nfrom IEEETestbankApp.models.auth import Config\nfrom IEEETestbankApp.models.db import db\nimport httplib2\n\ndef fetch_latest_cred(val):\n credentials = client.OAuth2Credentials.from_json(val)\n if credentials.access_token_expired:\n print('[credhelper.py] Detected credential expiration, refreshing tokens.')\n credentials.refresh(httplib2.Http())\n store_cred(credentials)\n return credentials\n\ndef store_cred(credentials):\n config_gdrive_cred = Config.query.filter_by(name='gdrive_oauth2_credentials').first()\n if config_gdrive_cred != None:\n config_gdrive_cred.value = credentials.to_json()\n else:\n new_cred = Config(name = 'gdrive_oauth2_credentials',\n value = credentials.to_json(),\n description = \"IEEE@UMD Testbank <-> Google Drive Credentials\")\n db.session.add(new_cred)\n db.session.commit()\n","repo_name":"UMDIEEE/ieee-web","sub_path":"IEEETestbankApp/views/credhelper.py","file_name":"credhelper.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8220956904","text":"from sys import argv\nfrom download_91porn import download_91porn\nfrom download_91porny import download_91porny\nfrom download_iyf import download_iyf\n\nif __name__ == \"__main__\":\n if len(argv) < 2:\n url = input(\"URL: \")\n else:\n url = argv[1]\n \n if '91porny.com' in url:\n download_91porny(url)\n elif '91porn.com' in url:\n download_91porn(url)\n elif 'www.iyf.tv' in url:\n download_iyf(url)\n else:\n print('{} is not in supported sites.'.format(url))","repo_name":"unilink233/porn_download","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17892830388","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\n@author: TavoGLC\nFrom:\n\n-Parameter estimation of differential equation models-\n\n\"\"\"\n\n###############################################################################\n# Libraries to use \n###############################################################################\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patheffects as path_effects\n\nfrom scipy.integrate import odeint\nfrom scipy.optimize import curve_fit\n\nimport scipy.stats as stats\n\n###############################################################################\n# General plot functions \n###############################################################################\n\n#Elimates the left and top lines and ticks in a matplotlib plot\ndef PlotStyle(Axes,Title):\n \n Axes.spines['top'].set_visible(False)\n Axes.spines['right'].set_visible(False)\n Axes.spines['bottom'].set_visible(True)\n Axes.spines['left'].set_visible(True)\n Axes.xaxis.set_tick_params(labelsize=14)\n Axes.yaxis.set_tick_params(labelsize=14)\n Axes.set_title(Title)\n\n###############################################################################\n# General Model Construction \n###############################################################################\n\n#Performs the dot produt to make the model \ndef MakeModel(MatrixCoeficients,InitialConditions):\n \n return np.dot(MatrixCoeficients,InitialConditions)\n\n###############################################################################\n# ODE system solving \n###############################################################################\n\nSolverTime=np.linspace(0,20,num=150)\n\n#Parameters for Model A\nalpha=0.4\nbeta=1\n\n#Matrix of coeficients for model A\n#Model A is refered in this script as model 01\ndef MakeModelMatrix01(Alpha,Beta):\n \n Matrix=np.zeros((2,2))\n\n Matrix[0,0]=Alpha\n Matrix[0,1]=-Beta\n Matrix[1,0]=1\n \n return Matrix\n\n#Integrating Model A\nMatrix01=MakeModelMatrix01(alpha,beta)\nInt=np.array([1,1])\n\ndef SODE(InitialConditions,t):\n \n return MakeModel(Matrix01,InitialConditions)\n\nSolution=odeint(SODE,Int,SolverTime)\n\n###############################################################################\n# Visualisation\n###############################################################################\n\nDerivativeLabel=r'$\\dfrac{d}{dt} f(t) $'\nSolutionLabel=r'$f(t)$'\n\nplt.figure(1,figsize=(9,6))\n\nplt.plot(SolverTime,Solution[:,1],'b-',label=SolutionLabel,path_effects=[path_effects.SimpleLineShadow(alpha=0.2,rho=0.2),\n path_effects.Normal()])\nplt.plot(SolverTime,Solution[:,0],'g-',label=DerivativeLabel,path_effects=[path_effects.SimpleLineShadow(alpha=0.2,rho=0.2),\n path_effects.Normal()])\n\nplt.xlabel('Time',fontsize=16,fontweight='bold')\nplt.ylabel('Displacement',fontsize=16,fontweight='bold')\nplt.legend(loc=0,fontsize=14)\n\nax=plt.gca()\nPlotStyle(ax,'')\n\n###############################################################################\n# Data Generation\n###############################################################################\n\n#Element wise sum of two iterables of the same size, name makes reference to the output rather than the process\ndef MakeNoisyData(Data,Noise):\n \n return [val+cal for val,cal in zip(Data,Noise)]\n\nWhiteNoise=[np.random.uniform(low=-1,high=1)*3 for val in Solution[:,1]]\nWhiteSignal=MakeNoisyData(Solution[:,1],WhiteNoise)\n\n###############################################################################\n# ODE fitting \n###############################################################################\n\n#Function for parameter estimation\ndef ModelSolver01(t,Alpha,Beta,InitialConditions):\n \n cAlpha=Alpha\n cBeta=Beta\n cInit=InitialConditions\n \n cMatrix=MakeModelMatrix01(cAlpha,cBeta)\n \n def LocalModel(cInit,t):\n \n return MakeModel(cMatrix,cInit)\n \n Solution=odeint(LocalModel,cInit,t)\n \n return Solution[:,1]\n\ndef ModelSolution01(t,Alpha,Beta):\n \n return ModelSolver01(t,Alpha,Beta,Int)\n \nModel01Params=curve_fit(ModelSolution01,SolverTime,WhiteSignal)\n\n###############################################################################\n# Fit solution\n###############################################################################\n\nfAlpha=Model01Params[0][0]\nfBeta=Model01Params[0][1]\n\nFitSolutionA=ModelSolution01(SolverTime,fAlpha,fBeta)\n\n###############################################################################\n# Visualization \n###############################################################################\n\nplt.figure(2,figsize=(9,6))\n\n(markers, stemlines, baseline) = plt.stem(SolverTime, WhiteSignal,bottom=-42,label='Data',basefmt=\" \")\nplt.setp(stemlines, linestyle=\"-\", color=\"red\", linewidth=0.5,alpha=0.5 )\nplt.setp(markers, color=\"red\",alpha=0.75 )\n\nplt.plot(SolverTime,FitSolutionA,'b-',label=SolutionLabel,path_effects=[path_effects.SimpleLineShadow(alpha=0.2,rho=0.2),\n path_effects.Normal()])\n \nplt.xlabel('Time',fontsize=16,fontweight='bold')\nplt.ylabel('Displacement',fontsize=16,fontweight='bold')\nplt.legend(loc=0,fontsize=14)\n\nplt.ylim(-42,75)\n\nax=plt.gca()\nPlotStyle(ax,'')\n\n###############################################################################\n# Residuals Statistical test \n###############################################################################\n\nObRes=[signal-model for signal,model in zip(WhiteSignal,FitSolutionA)]\n\nKS=stats.ks_2samp(ObRes,WhiteNoise)\n\nprint(KS)\n\n###############################################################################\n# ODE system solving \n###############################################################################\n\nSolverTime=np.linspace(0,20,num=120)\n\n#Model B Parameters\nk1=0.3\nk2=0.25\nk3=0.1\n\n#Coeficients matrix for model B\n#Model B is refered as model02\ndef MakeModelMatrix02(K1,K2,K3):\n \n Matrix=np.zeros((3,3))\n\n Matrix[0,0]=-K1\n Matrix[0,1]=K3\n\n Matrix[1,0]=K1\n Matrix[1,1]=-(K2+K3)\n\n Matrix[2,1]=K2\n \n return Matrix\n\nMatrix02=MakeModelMatrix02(k1,k2,k3)\nInitialConditions=[5,0,0]\n\ndef KineticsSystem(InitialConditions,t):\n \n return MakeModel(Matrix02,InitialConditions)\n\nSystemSolution=odeint(KineticsSystem,InitialConditions,SolverTime)\n\n###############################################################################\n# Visualization\n###############################################################################\n\nplt.figure(3,figsize=(9,6))\n\nplt.plot(SolverTime,SystemSolution[:,0],'b-',label='[A]',path_effects=[path_effects.SimpleLineShadow(alpha=0.2,rho=0.2),\n path_effects.Normal()])\nplt.plot(SolverTime,SystemSolution[:,1],'g-',label='[B]',path_effects=[path_effects.SimpleLineShadow(alpha=0.2,rho=0.2),\n path_effects.Normal()])\nplt.plot(SolverTime,SystemSolution[:,2],'m-',label='[C]',path_effects=[path_effects.SimpleLineShadow(alpha=0.2,rho=0.2),\n path_effects.Normal()])\n\nplt.xlabel('Time',fontsize=16,fontweight='bold')\nplt.ylabel('Concentration',fontsize=16,fontweight='bold')\nplt.legend(loc=0,fontsize=14)\n\nax=plt.gca()\nPlotStyle(ax,'')\n\n###############################################################################\n# Data Generation\n###############################################################################\n\nWhiteNoise=[np.random.uniform(low=-1,high=1)/4 for val in SystemSolution[:,2]]\nWhiteSignal=MakeNoisyData(SystemSolution[:,2],WhiteNoise)\n\n###############################################################################\n# ODE fitting \n###############################################################################\n\ndef ModelSolver02(t,K1,K2,K3,InitialConditions):\n \n cK1=K1\n cK2=K2\n cK3=K3\n \n cInit=InitialConditions\n \n cMatrix=MakeModelMatrix02(cK1,cK2,cK3)\n \n def LocalModel(cInit,t):\n \n return MakeModel(cMatrix,cInit)\n \n Solution=odeint(LocalModel,cInit,t)\n \n return Solution[:,2]\n\ndef ModelSolution02(t,K1,K2,K3):\n \n return ModelSolver02(t,K1,K2,K3,InitialConditions)\n \n \nModel02Params=curve_fit(ModelSolution02,SolverTime,WhiteSignal)\n\nfK1=Model02Params[0][0]\nfK2=Model02Params[0][1]\nfK3=Model02Params[0][2]\n\nFitSolutionB=ModelSolution02(SolverTime,fK1,fK2,fK3)\n\n###############################################################################\n# Visualization\n###############################################################################\n\nplt.figure(4,figsize=(9,6))\n\n(markers, stemlines, baseline) = plt.stem(SolverTime, WhiteSignal,bottom=0,label='Data',basefmt=\" \")\nplt.setp(stemlines, linestyle=\"-\", color=\"red\", linewidth=0.5,alpha=0.5 )\nplt.setp(markers, color=\"red\",alpha=0.75 )\n\nSolutionLabel='[C]'\nplt.plot(SolverTime,FitSolutionB,'m-',label=SolutionLabel,path_effects=[path_effects.SimpleLineShadow(alpha=0.2,rho=0.2),\n path_effects.Normal()])\n \nplt.xlabel('Time',fontsize=16,fontweight='bold')\nplt.ylabel('Concentration',fontsize=16,fontweight='bold')\nplt.legend(loc=0,fontsize=14)\n\nplt.ylim(0,5.2)\n\nax=plt.gca()\nPlotStyle(ax,'')\n\n###############################################################################\n# Residuals Statistical test \n###############################################################################\n\nObRes=[signal-model for signal,model in zip(WhiteSignal,FitSolutionB)]\n\nKS=stats.ks_2samp(ObRes,WhiteNoise)\n\nprint(KS)\n","repo_name":"TavoGLC/DataAnalysisByExample","sub_path":"ParameterEstimation/Complete/ODE fitting 02.py","file_name":"ODE fitting 02.py","file_ext":"py","file_size_in_byte":9714,"program_lang":"python","lang":"de","doc_type":"code","stars":42,"dataset":"github-code","pt":"81"} +{"seq_id":"26073392166","text":"import argparse\nimport requests\nimport logging\nimport threading\nimport time\nimport random\nlogging.getLogger().setLevel(logging.INFO)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--front-end-dns\", required=True)\n args = parser.parse_args()\n\n frontend = args.front_end_dns\n\n response = requests.get(\"http://\" + frontend + \":5004/lookup/1\")\n t_start = time.time()\n response = requests.get(\"http://\" + frontend + \":5004/lookup/1\")\n t_end = time.time()\n t_diff = (t_end - t_start) * 1000\n logging.info(f\"Response time for cache lookup {t_diff}\")\n\n response = requests.post(\"http://\" + frontend + \":5004/buy/1\")\n\n t_start = time.time()\n response = requests.get(\"http://\" + frontend + \":5004/lookup/1\")\n t_end = time.time()\n t_diff = (t_end - t_start) * 1000\n logging.info(f\"Response time for cache miss lookup {t_diff}\")","repo_name":"hoang-ho/CompSci677_Lab3","sub_path":"InvalidateCacheOverhead.py","file_name":"InvalidateCacheOverhead.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15680550270","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:\n count = 0\n curr = head\n while curr:\n count += 1\n curr = curr.next\n \n if count == 1:\n return None\n if count == n:\n head = head.next\n return head\n \n index = count - n\n prev = None\n curr = head\n local_count = 0\n while curr:\n if local_count == index:\n prev.next = curr.next\n break\n local_count += 1\n prev = curr\n curr = curr.next\n return head\n ","repo_name":"Tek58/Leetcode","sub_path":"19-remove-nth-node-from-end-of-list/19-remove-nth-node-from-end-of-list.py","file_name":"19-remove-nth-node-from-end-of-list.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6189261691","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 12 10:28:50 2018\n\n@author: federico nemmi\n\"\"\"\nimport os\nos.chdir(\"/home/zipat/Documents/Python Scripts/multiparametric_patrice/outcome_analyses/whole_features_set\")\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import scale\nfrom sklearn.svm import LinearSVR\nfrom sklearn.utils import shuffle\nfrom sklearn.utils import resample\nfrom sklearn.feature_selection import SelectKBest, f_regression\nfrom sklearn.preprocessing import binarize\nfrom sklearn.metrics import accuracy_score\nimport matplotlib.pyplot as plt\nfrom helper_functions_mean_ci import mean_confidence_interval\nfrom sklearn.model_selection import KFold\n\n\nwhole_data = pd.read_table(\"../../FC_SC_PCC_MPC_Cingulum_v17072018_SubRegions.csv\", decimal = \",\", sep = \",\")\nwhole_data = whole_data.dropna()\n\noutcome = whole_data.loc[:,\"CRS_R\"].values\n\n\n\nnum_data = whole_data.iloc[:,5:].values\nnum_date = scale(num_data)\ncolnames = whole_data.columns[5:]\n\n\nrandom_state = 12072018\n\nsubject_id = np.arange(num_data.shape[0])\n\n\n\nn_reps = 100\nn_reps_shuff = 1000\n\n\nk_best = [.9, .8, .7, .6, .5, .4, .3, .2, .1]\nouter_corr_coeff = {}\nouter_acc = {}\n\n \npreds = list()\noob_outcomes = list()\nR2 = list()\ncorr = list()\nmae = list()\nk_selected = []\nselected_features = []\n\nfor n in np.arange(n_reps):\n print(\"Advancement {}%\".format(((n+1)/n_reps)*100))\n X_res, Y_res, subj_id_res = resample(num_data, outcome, subject_id, random_state = random_state + n)\n oob_subjects = [s for s in subject_id if s not in subj_id_res]\n oob_outcome = outcome[oob_subjects]\n oob_features = num_data[oob_subjects, :]\n grid_search_results = list()\n models = list()\n \n rfk = KFold(n_splits=10, random_state = 100)\n \n cv_k_best = []\n selectors = []\n \n for val in k_best:\n cv_pred = []\n cv_outcome = []\n selector = SelectKBest(score_func = f_regression, k = round(len(colnames) * val))\n \n for train, test in rfk.split(X_res, Y_res):\n X_res_int = X_res[train,:]\n Y_res_int = Y_res[train]\n selector.fit(X_res_int, Y_res_int.ravel())\n X_res_selected = selector.transform(X_res_int)\n clf = LinearSVR()\n clf.fit(X_res_selected, Y_res_int.ravel())\n inner_preds = clf.predict(selector.transform(X_res[test,:]))\n cv_pred = cv_pred + list(inner_preds)\n cv_outcome = cv_outcome + list(Y_res[test])\n cv_k_best.append(np.mean(np.square(np.array(cv_outcome) - np.array(cv_pred))))\n selectors.append(selector)\n \n best_selector = np.array(selectors)[np.array(cv_k_best).argsort()][0]\n best_selector.fit(X_res, Y_res.ravel())\n k_selected.append(np.array(k_best)[np.array(cv_k_best).argsort()][0])\n selected_features.append(colnames[best_selector.get_support()].tolist())\n #for g in ParameterGrid(grid):\n # SVRmod = SVR(**g)\n # SVRmod.fit(X_res, Y_res)\n # Rsq = SVRmod.score(X_res, Y_res)\n # grid_search_results.append(Rsq)\n # models.append(SVRmod)\n X_res_selected_outern = best_selector.transform(X_res) \n clf_outern = LinearSVR()\n \n clf_outern.fit(X_res_selected_outern, Y_res)\n \n oob_pred = clf_outern.predict(best_selector.transform(oob_features))\n \n preds.append(oob_pred)\n \n \n oob_outcomes.append(oob_outcome)\n \n \n R2.append(clf_outern.score(best_selector.transform(oob_features), oob_outcome))\n corr.append(np.corrcoef(oob_outcome, oob_pred)[:,1][0])\n mae.append(np.mean(np.abs(oob_outcome - oob_pred)))\n\n\n\nR2_shuff = list()\ncorr_shuff = list()\nmae_shuff = list()\n\nfor n in np.arange(n_reps_shuff):\n print(\"Advancement {}%\".format(((n+1)/n_reps_shuff)*100))\n X_res, Y_res, subj_id_res = resample(num_data, outcome, subject_id, random_state = random_state + n)\n oob_subjects = [s for s in subject_id if s not in subj_id_res]\n oob_outcome = outcome[oob_subjects]\n oob_features = num_data[oob_subjects, :]\n grid_search_results = list()\n models = list()\n \n rfk = KFold(n_splits=10, random_state = 100)\n \n cv_k_best = []\n selectors = []\n \n X_res = shuffle(X_res, random_state = 100)\n \n for val in k_best:\n cv_pred = []\n cv_outcome = []\n selector = SelectKBest(score_func = f_regression, k = round(len(colnames) * val))\n \n for train, test in rfk.split(X_res, Y_res):\n X_res_int = X_res[train,:]\n Y_res_int = Y_res[train]\n selector.fit(X_res_int, Y_res_int.ravel())\n X_res_selected = selector.transform(X_res_int)\n clf = LinearSVR()\n clf.fit(X_res_selected, Y_res_int.ravel())\n inner_preds = clf.predict(selector.transform(X_res[test,:]))\n cv_pred = cv_pred + list(inner_preds)\n cv_outcome = cv_outcome + list(Y_res[test])\n cv_k_best.append(np.mean(np.square(np.array(cv_outcome) - np.array(cv_pred))))\n selectors.append(selector)\n \n best_selector = np.array(selectors)[np.array(cv_k_best).argsort()][0]\n best_selector.fit(X_res, Y_res.ravel())\n \n X_res_selected_outern = best_selector.transform(X_res) \n clf_outern = LinearSVR()\n \n clf_outern.fit(X_res_selected_outern, Y_res)\n \n oob_pred = clf_outern.predict(best_selector.transform(oob_features))\n \n \n \n R2_shuff.append(clf_outern.score(best_selector.transform(oob_features), oob_outcome))\n corr_shuff.append(np.corrcoef(oob_outcome, oob_pred)[:,1][0])\n mae_shuff.append(np.mean(np.abs(oob_outcome - oob_pred)))\n\n\n\n\n\nimport pickle\n\nwith (open(\"CRS_SVR_select_k_best_independent.pkl\", \"wb\")) as f:\n pickle.dump([R2, R2_shuff, corr, corr_shuff, mae, mae_shuff, oob_outcomes, selected_features, k_selected], f)\n\n\n\nimport pickle\n\nwith (open(\"CRS_SVR_select_k_best_independent.pkl\", \"rb\")) as f:\n tt = pickle.load(f)\n\n\nR2 = tt[0]\nR2_shuff = tt[1]\ncorr = tt[2]\ncorr_shuff = tt[3]\nmae = tt[4]\nmae_shuff = tt[5]\nselected_features = tt[7]\n\nselected_features = [[x.replace(\"DP\", \"RD\") for x in ll] for ll in selected_features]\nselected_features = [[x.replace(\"FC_\", \"FC \") for x in ll] for ll in selected_features]\nselected_features = [[x.replace(\"_\", \" - \") for x in ll] for ll in selected_features]\nselected_features = [[x.replace(\"PCC\", \"PMC\") for x in ll] for ll in selected_features]\n\n\nmean_confidence_interval(R2)\nmean_confidence_interval(corr)\nmean_confidence_interval(mae)\n\nmean_confidence_interval(R2_shuff)\nmean_confidence_interval(corr_shuff)\nmean_confidence_interval(mae_shuff)\n\nlab_and_counts = np.unique(np.hstack(np.array(selected_features).ravel()), return_counts = True)\nfeatures_bigger_than_thr = [el for el, val in zip(lab_and_counts[0], lab_and_counts[1]) if val > n_reps * .50]\ncounts_bigger_than_thr = [val for val in lab_and_counts[1] if val > n_reps * .50]\n\nplt.hist(k_selected)\n\n\nsorted_features = np.array(features_bigger_than_thr)[np.array(counts_bigger_than_thr).argsort()[::-1]]\ncounts_bigger_than_thr.sort(reverse = True)\nplt.rcParams.update({'font.size': 30})\nplt.figure(num = 1, figsize = (18,18), dpi = 180)\nplt.bar(sorted_features, counts_bigger_than_thr)\nplt.xticks(rotation=75, horizontalalignment = \"right\")\nplt.tight_layout()\nplt.title(\"CRS_R prediction\")\nplt.savefig(\"CRS_most_selected_features.tiff\")\n\n\n((R2_shuff > np.mean(R2)) * 1).mean()\n((corr_shuff > np.mean(corr)) * 1).mean()\n((mae_shuff < np.mean(mae)) * 1).mean()\n ","repo_name":"fnemmi-tonic/coma_discrimination_prediction_most_important_features","sub_path":"CRS_linear_SVR_lin_select_k_best_independent.py","file_name":"CRS_linear_SVR_lin_select_k_best_independent.py","file_ext":"py","file_size_in_byte":7435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"46278345226","text":"import random\nimport os\nclear = lambda: os.system('clear')\n\ncards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n\ndef handle_input(user_input):\n if user_input == \"y\":\n return True\n else:\n return False\n\ndef assign_card():\n return random.choice(cards)\n\ndef deal_hand():\n card1 = assign_card()\n card2 = assign_card()\n return [card1, card2]\n\ndef calculate_hand_score(hand):\n sum_hand = sum(hand)\n if sum_hand > 21 and hand.count(11) > 0:\n for i in range(0, len(hand)):\n if hand[i] == 11:\n hand[i] = 1\n return sum(hand)\n \n return sum_hand\n \n\ndef ask_to_begin():\n return handle_input(input(\"would you like to play a hand of blackjack? 'y' = yes, 'n' = no: \"))\n\n\ndef prompt_user(user_hand, user_score, computer_hand):\n print(f\"Your cards: {user_hand}. Current score: {user_score}\\nComputer's face up {computer_hand[1]}\")\n if user_score == 21:\n print(\"I suggest you stay sir\")\n return handle_input(input(\"y to hit, n to stay: \"))\n\n\ndef play_blackjack():\n clear()\n print(\"\"\"Welcome to blackjack.\\n Rules:\\n\n 1. Aces will automatically be used as 11 or converted into 1 if hand goes over 21\n 2. If you both blackjack from the start, you lose.\n 3. Any other tie is a tie.\n 4. GL you will need it\"\"\")\n user_hand = deal_hand()\n computer_hand = deal_hand()\n user_score = sum(user_hand)\n computer_score = sum(computer_hand)\n if user_score == 21 or computer_score == 21:\n if user_score == 21 and computer_score == 21:\n print(\"You both blackjacked but tie goes to the dealer :(\")\n if user_score == 21:\n print(f\"Boom, {user_hand} blackjack. You're a winner!!\")\n elif computer_score == 21:\n print(f\"Soz, computer blackjacked {computer_hand}. You lose :(\")\n if ask_to_begin():\n play_blackjack()\n \n\n user_choice = prompt_user(user_hand, user_score, computer_hand)\n while user_choice:\n user_hand.append(assign_card())\n user_score = calculate_hand_score(user_hand)\n if user_score <= 21:\n user_choice = prompt_user(user_hand, user_score, computer_hand)\n else:\n return print(f\"You bust with a hand of {user_hand} and a score of {user_score}, you lose!\")\n \n while computer_score < 16:\n computer_hand.append(assign_card())\n computer_score = calculate_hand_score(computer_hand)\n \n if computer_score > 21:\n print(f\"Nice you win with a hand of {user_hand} and a score of {user_score}.\\nComputer busts with a hand of {computer_hand} and a score of {computer_score}.\")\n else:\n if computer_score == user_score:\n return print(f\"You tied with a hand of {user_hand} and a score of {user_score}.\\nComputer had a hand of {computer_hand} also a score of {computer_score}.\")\n elif computer_score > user_score:\n return print(f\"You didn't gamble hard enough with a a hand of {user_hand} and a score of {user_score}.The computer had a hand of {computer_hand} and a score of {computer_score}.\")\n else:\n return print(f\"Nice, you're a gambling legend with a hand of {user_hand} and a score of {user_score}. The computer lost with a hand of {computer_hand} and a score of {computer_score}.\")\n while ask_to_begin():\n play_blackjack()\n\nwhile ask_to_begin():\n play_blackjack()\n\n \n\n","repo_name":"zevpermack/python100","sub_path":"d11/blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13578823505","text":"import sqlite3\n\ndef get_valid_integer_input(prompt):\n while True:\n try:\n value = int(input(prompt))\n return value\n except ValueError:\n print(\"Please enter a valid integer.\")\n\nconn = sqlite3.connect('pokemon.db')\ncursor = conn.cursor()\n\nprint(\"Pokemon Team Builder 1.0\")\nsearch_term_type = input(\"What type are you looking for? \")\nsearch_term_stats = get_valid_integer_input(\"What base stat total minimum are you looking for? \")\n\nsearch_term_type = '%' + search_term_type + '%' # Wildcards for partial matches\n\ncursor.execute(\"SELECT * FROM pokedex WHERE LOWER(type) LIKE LOWER(?) AND total > ? ORDER BY total DESC\", (search_term_type, search_term_stats))\nresult = cursor.fetchall()\nconn.close()\n\nfor row in result:\n print(row)\n","repo_name":"OscarMederos/utilities","sub_path":"team_planner.py","file_name":"team_planner.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71382140106","text":"import typing\nfrom typing import List, Tuple\n\n\ndef load_subtitle(filename: str) -> List[Tuple[List[str], List[str]]]:\n f = open(filename, 'rt', encoding='utf8')\n text = f.read()\n lines = text.splitlines(keepends=False)\n\n names = []\n words = []\n pages = []\n\n line_counter = 0\n for line in lines:\n if line.strip() == '' or line.strip()[0] == '#':\n continue\n\n if line.startswith('---') and line.endswith('---'):\n page_split = True\n else:\n page_split = False\n\n if page_split:\n if len(names) == len(words) == len(pages) == 0:\n words.append('')\n names.append('')\n continue\n if len(names) == 1 and len(words) == 0:\n # speak aside\n words.append(names[0])\n names[0] = ''\n pages.append((names, words))\n names = []\n words = []\n line_counter = 0\n continue\n\n\n if line_counter % 2 == 0:\n names.append(line.strip('::'))\n else:\n words.append(line)\n line_counter += 1\n if len(names) == 1 and len(words) == 0:\n # speak aside\n words.append(names[0])\n names[0] = ''\n if len(names) != 0 and len(words) != 0:\n pages.append((names, words))\n\n return pages\n\n\nif __name__ == '__main__':\n pages = load_subtitle('subtitle.txt')\n\n for page in pages:\n print('--------------')\n names, words = page\n for i in range(len(names)):\n print(names[i] + ': ' + words[i])\n","repo_name":"tianer2820/Subtitles-for-Images","sub_path":"src/subtitle_reader.py","file_name":"subtitle_reader.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42085108583","text":"import sys\nfrom typing import NoReturn, Union\n\n\nfrom .address_book import AddressBook # .address_book import AddressBook\nfrom .handlers import (\n main_handler,\n ALL_COMMAND,\n ALL_COMMAND_ADDRESSBOOK,\n ALL_COMMAND_NOTEBOOK,\n ALL_COMMAND_FILESORTER,\n)\nfrom .note_book import NoteBook\nfrom .serialization import LoadBook, OpenBook\n\n\nclass InputToParser:\n\n @staticmethod\n def listen(request='How can I help you?\\n'):\n \"\"\"Get a user string - separate the command and parameters - \n return it to the list, where the first element is the command, \n the others are parameters.\n\n Parameters:\n request (str): String line for user request.\n\n Returns:\n list command of user input (list): list of commands (list of strings).\n \"\"\"\n user_input = input(request)\n # Example: aDd BirthDay 2000-11-12 -> add~birthday~2000-11-12\n command_line = user_input.strip().replace(' ', '~').replace(' ', ' ').replace(' ', '~').lower()\n # Example: ['remove~birthday', 'change~birthday' ... ]\n all_commands = sorted([el.replace('_', '~') for el in ALL_COMMAND], key=len)[::-1]\n\n for command in all_commands:\n command = str(command) # Example: 'remove~birthday' ... 'add~birthday'\n if (command_line.startswith(command) and len(command_line) == len(command)) or \\\n command_line.startswith(f'{command}~'): # Example: 'add~phone'\n # # Example: ['add_birthday'] + ['2000-11-12']\n return [command.replace('~', '_')] + [word for word in user_input[len(command):].split(' ') if word]\n # Example: ['unknown', 'command', 'abracadabra']\n return user_input.strip().split(' ')\n\n\nclass OutputAnswer:\n\n @staticmethod\n def show_out(user_request: list, book_instance: Union[AddressBook, NoteBook], new_path_file: str) -> bool:\n \"\"\"Show answer for the user.\n \n Parameters:\n user_request (list): List of command with parameters (user request).\n book_instance (AddressBook|NoteBook): Instance of book.\n new_path_file (str): Path of file for book save/load.\n\n Returns:\n Result for new loop (bool): Answer - Do you want to continue working?.\n \"\"\"\n bot_answer = main_handler(user_request, book_instance, new_path_file)\n\n if isinstance(bot_answer, str):\n print(bot_answer)\n\n elif isinstance(bot_answer, list):\n for volume in bot_answer:\n if volume:\n print(volume)\n input('Press Enter for next page... ')\n\n else:\n print('Something happened. Will you try again?')\n\n if 'Good bye! Have some fun and take care!' in bot_answer:\n return False \n\n return True\n\n\nclass PVA:\n \"\"\"Main personal virtual assistant class.\"\"\"\n def __init__(self) -> None:\n try:\n self.path_file = sys.argv[1]\n\n except IndexError:\n self.path_file = 'ABook.data'\n \n try:\n self.path_file_notes = sys.argv[2]\n\n except IndexError:\n self.path_file_notes = 'NoteBook.data'\n\n self.path_file = OpenBook(self.path_file).open_book()\n self.path_file_notes = OpenBook(self.path_file_notes).open_book()\n\n self.contact_dictionary, self.path_file = LoadBook(self.path_file).load_book(AddressBook)\n self.note_book, self.path_file_notes = LoadBook(self.path_file_notes).load_book(NoteBook)\n\n # self.parser = InputToParser()\n print('A personal virtual assistant welcomes you.\\nHello!\\n')\n\n def start(self) -> NoReturn:\n \"\"\"The main function of launching a helper console bot that recognize \n the commands entered from the keyboard and respond according \n to the command entered. Enter a command - get an answer.\n \"\"\"\n while True:\n\n user_request = InputToParser.listen()\n\n if user_request[0] in ALL_COMMAND_ADDRESSBOOK: # dict of commands\n bot_answer_result = OutputAnswer.show_out(user_request, self.contact_dictionary, self.path_file)\n elif user_request[0] in ALL_COMMAND_NOTEBOOK:\n bot_answer_result = OutputAnswer.show_out(user_request, self.note_book, self.path_file_notes)\n elif user_request[0] in ALL_COMMAND_FILESORTER:\n bot_answer_result = OutputAnswer.show_out(user_request, None, '')\n else:\n user_request = ['command_guesser'] + user_request\n bot_answer_result = OutputAnswer.show_out(user_request, None, '')\n\n if not bot_answer_result:\n break\n\n\ndef main() -> NoReturn:\n PVA().start()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DenysTantsiura/pva","sub_path":"pva/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"3105281692","text":"import xml.etree.ElementTree as ET\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\n\n# import all files from Annotations\npath = \"/Users/weijt606/dataset/Annotations\"\nfiles = os.listdir(path)\ndf_list = []\nfor file in files:\n xml_data = open(path+'/'+file).read()\n\n def xml2df(xml_data):\n tree = ET.XML(xml_data) # element tree, you can also use ET.fromstring\n all_records = [] # convert all record list into a dataframe\n for i, child in enumerate(tree): # from root tree to extract child tree\n record = {} # place hold for record\n for subchild in child: # iterate through the subchildren\n for subsubchild in subchild: # iterate through the child of subchilden\n record[subsubchild.tag] = subsubchild.text # extract the element from \"bnbbox and create a new dictionary key\n all_records.append(record) # append this record to all_records.\n return pd.DataFrame(all_records) #return records as DataFrame\n\n ss = {}\n df = xml2df(xml_data)\n df = df.drop_duplicates() #delete repeated data\n df = df.sort_index() # sort data as ascending\n df = df.reset_index() # sort out new oder of columns\n del df['index']\n # df = df.convert_objects(convert_numeric=True).dtypes # data type: object to int\n df = df.applymap(int) #global transform data from object to int\n\n df[\"size_x\"] = df[\"xmax\"] - df[\"xmin\"]\n df[\"size_y\"] = df[\"ymax\"] - df[\"ymin\"]\n df = df[['xmax','xmin','size_x','ymax','ymin','size_y']] #order the index as: xmax xmin size_x ymax ymin size_y\n # print df.dtypes # check data type\n df_list.append(df) # append every dataframe to df_list\n df = pd.concat(df_list, ignore_index=True) #concatenation all DataFrames to one DataFrame\n\n\nprint(df)\n\nplt.subplot(121)\n# plt.hist(df[\"size_x\"], normed=1, bins= range(0,120,5)) # probability\nplt.hist(df[\"size_x\"], bins= range(0,120,5))\n# plt.plot(df[\"size_x\"])\nplt.xlabel('size')\nplt.ylabel('scalar')\n# plt.ylabel('probability')\nplt.title('Length of BoundingBox')\n\nplt.subplot(122)\n# plt.hist(df[\"size_y\"], normed=1, bins= range(0,120,5), facecolor='red') # probability\nplt.hist(df[\"size_y\"], bins= range(0,120,5), facecolor='red')\n# plt.plot(df[\"size_y\"])\nplt.xlabel('size')\nplt.ylabel('scalar')\n# plt.ylabel('probability')\nplt.title('Height of BoundingBox')\n\nplt.show()\n","repo_name":"weijt606/bndbox_analyze","sub_path":"bndbox_analyze.py","file_name":"bndbox_analyze.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74199057865","text":"from python1 import asal_mi\n\n# Verilen bir sayının asal çarpanlarını bulan fonksiyonu yazalım\nasal_mi(34)\n\n\ndef asal_carpan_bul(sayi):\n asal_carpanlar = []\n bolen = 2\n while bolen < sayi:\n if sayi % bolen == 0 and asal_mi(bolen):\n asal_carpanlar.append(bolen)\n bolen = bolen + 1\n return asal_carpanlar\n\n\nprint(asal_carpan_bul(210))\n","repo_name":"Adem54/Python-Tutorials","sub_path":"4.Week-3/python3.py","file_name":"python3.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13581602279","text":"import numpy as np\nimport pynbody\n\nif __name__ == '__main__':\n # change the path to where you store the files on the local machine\n path = '/mnt/c/Python_projects/data_test/'\n\n sim_size = 256\n\n # do I need to use open() for better file handling?\n f = pynbody.load(path + 'full-data_reseed1_simulation_snapshots_IC.gadget3')\n\n print(\"Attempting to create den_contrast_1.npy\")\n try:\n den_contrast = np.load(path+'den_contrast_1.npy')\n print(\"den_contrast_1.npy already created\")\n\n except OSError: #FileNotFoundError\n rho_m = pynbody.analysis.cosmology.rho_M(f, unit=f[\"rho\"].units)\n den_contrast = f['rho']/rho_m\n\n np.save(path+'den_contrast_1.npy', den_contrast)\n print('den_contrast_1.npy created')\n","repo_name":"flemesre/PHYS-449-final-project","sub_path":"Debug/Dataloader code/Ver 1/load_data_from_pynbody.py","file_name":"load_data_from_pynbody.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17991359797","text":"class TreeNode(object):\n val = None\n size = 1\n left = right = None\n\n def __init__(self, val):\n self.val = val\n\n\nclass Solution(object):\n def countSmaller(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n counts = [0] * len(nums)\n root = None\n for i in range(len(nums) - 1, -1, -1):\n root, counts[i] = Solution._insert(root, nums[i])\n return counts\n\n @staticmethod\n def _insert(root, x):\n if root is None:\n return TreeNode(x), 0\n\n if x <= root.val:\n root.left, num_smaller = Solution._insert(root.left, x)\n else:\n root.right, num_smaller = Solution._insert(root.right, x)\n num_smaller += root.size - root.right.size + 1\n root.size += 1\n return root, num_smaller\n","repo_name":"hosang/coding-prep","sub_path":"leetcode/315_count_of_smaller_numbers_after_self/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33083634210","text":"import pandas as pd\nimport numpy as np\nimport pyshark \n\n\n# Create a LiveCapture object\ncap = pyshark.LiveCapture(interface='eno1') #, bpf_filter=\"tcp or dns or http or ssl or tls\")\n\n# Create an empty list to store the captured packets\npacket_list = []\nurl = \"https://www.nypost.com\"\nurl_name = \"nypost\"\n\n\n\n# Start capturing packets\nfor packet in cap.sniff_continuously():\n \n # Check if the packet is a TCP packet and has an IP layer\n if 'ip' in packet:\n\n packet_dict = {}\n\n # Extract the fields of the IP layer\n ip_layer = packet['ip']\n packet_dict['ip.src'] = ip_layer.src\n packet_dict['ip.dst'] = ip_layer.dst\n packet_dict['ip.proto'] = ip_layer.proto\n\n packet_dict['highest_layer'] = packet.highest_layer\n packet_dict['length'] = packet.length\n packet_dict['number'] = packet.number\n packet_dict['sniff_time'] = packet.sniff_time\n packet_dict['sniff_timestamp'] = packet.sniff_timestamp\n\n if 'dns' in packet:\n dns_layer = packet['dns']\n packet_dict['dns.id'] = dns_layer.id\n packet_dict['dns.flags.response'] = dns_layer.flags_response\n packet_dict['dns.qry.name'] = dns_layer.qry_name\n try: \n packet_dict['dns.resp.name'] = dns_layer.resp_name\n except:\n packet_dict['dns.resp.name'] = \"\"\n packet_dict['dns.qry.type'] = dns_layer.qry_type\n packet_dict['dns.qry.class'] = dns_layer.qry_class\n packet_dict['dns.count.queries'] = dns_layer.count_queries\n packet_dict['dns.count.answers'] = dns_layer.count_answers\n packet_dict['dns.count.authority'] = dns_layer.count_auth_rr\n packet_list.append(packet_dict)\n df = pd.DataFrame(packet_list)\n df.to_csv(f\"da_{url_name}.csv\")\n continue\n\n\n if 'tcp' in packet:\n # Extract the fields of the TCP layer\n tcp_layer = packet['tcp']\n packet_dict['tcp.srcport'] = tcp_layer.srcport\n packet_dict['tcp.dstport'] = tcp_layer.dstport\n packet_dict['tcp.flags'] = tcp_layer.flags\n packet_dict['tcp.seq'] = tcp_layer.seq\n packet_dict['tcp.ack'] = tcp_layer.ack\n\n if 'tls' in packet:\n tls_layer = packet['tls']\n packet_dict['tls'] = str(tls_layer)\n\n packet_list.append(packet_dict)\n df = pd.DataFrame(packet_list)\n df.to_csv(f\"da_{url_name}.csv\")\ncap.close()\n\n","repo_name":"hessamhz/NM-NDA","sub_path":"network-measurement/https-fingerprinting/traffic_dumper.py","file_name":"traffic_dumper.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2637364262","text":"import subprocess\r\nimport pandas as pd\r\nimport pyautogui\r\nimport time\r\nfrom datetime import datetime\r\n\r\ndef signIn(meetingid):\r\n subprocess.call(\"C:\\Program Files (x86)\\Tencent\\WeMeet\\wemeetapp.exe\")\r\n time.sleep(3)\r\n\r\n join_btn = pyautogui.locateCenterOnScreen('join_button.png')\r\n pyautogui.moveTo(join_btn)\r\n pyautogui.click()\r\n time.sleep(1)\r\n\r\n meeting_id_btn = pyautogui.locateCenterOnScreen('meeting_id.png')\r\n pyautogui.moveTo(meeting_id_btn)\r\n pyautogui.click()\r\n pyautogui.write(meetingid)\r\n time.sleep(3)\r\n\r\n media_btn = pyautogui.locateAllOnScreen('Media_button.png')\r\n for btn in media_btn:\r\n pyautogui.moveTo(btn)\r\n pyautogui.click()\r\n time.sleep(1)\r\n\r\n join_btn = pyautogui.locateCenterOnScreen('enter_button.png')\r\n pyautogui.moveTo(join_btn)\r\n pyautogui.click()\r\n time.sleep(5)\r\n\r\n close_button = pyautogui.locateCenterOnScreen('close_button.png')\r\n pyautogui.moveTo(close_button)\r\n pyautogui.click()\r\n time.sleep(3)\r\n\r\n close_button2 = pyautogui.locateCenterOnScreen('close_button2.png')\r\n pyautogui.moveTo(close_button2)\r\n pyautogui.click()\r\n time.sleep(3)\r\n\r\n# df = pd.read_excel('timings.xlsx')\r\n\r\n# while True:\r\n# # To get current time\r\n# now = datetime.now().strftime(\"%H:%M\")\r\n# if now in str(df['Timings']):\r\n# mylist = df[\"Timings\"]\r\n# mylist = [i.strftime(\"%H:%M\") for i in mylist]\r\n# c = [i for i in range(len(mylist)) if mylist[i] == now]\r\n# row = df.loc[c]\r\n# meeting_id = str(row.iloc[0, 1])\r\n\r\n# time.sleep(5)\r\n# signIn(meeting_id)\r\n# time.sleep(2)\r\n# print('signed in')\r\n# break\r\n\r\n#signIn('123131231')\r\n","repo_name":"GaoFan98/login_meeting_script","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21945159454","text":"#Mathematical Libraries\nimport numpy as np\nimport mpmath as mp\nimport math\nfrom decimal import Decimal as D\n\n#Scipy\nimport scipy.special as sc\nfrom scipy.integrate import quad\nfrom scipy.optimize import fsolve\nfrom scipy.interpolate import interp1d\nfrom scipy.interpolate import UnivariateSpline\nfrom scipy.integrate import solve_ivp\n\n#Plotting Library\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n#Miscellaneous Libraries\nimport time\nfrom IPython.display import clear_output\nimport csv\nimport copy\n\n\n\n#~~~~~~~~~~~~~~~~~~~~~Class definition: PopIII stars~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nclass PopIIIStar:\n '''Describes important parameters of a population III star,\n Units:\n M - Solar\n R - Solar\n L - Solar\n Tc - Kelvin (K)\n rhoc - g/cm^3\n life_star - years'''\n def __init__(self, M = 0, R = 0, L = 0, Tc = 0, rhoc = 0, life_star = 0):\n self.mass = M\n self.radius = R\n self.lum = L\n self.core_temp = Tc\n self.core_density = rhoc\n self.lifetime = life_star\n\n #Calculates stellar volume\n def get_vol(self):\n vol = (4/3) * np.pi * (self.radius*6.96e10)**3 #in cm^3\n\n return vol\n\n def get_num_density(self):\n mn_grams = 1.6726e-24\n M = 1.9885e33 * self.mass\n\n n_baryon = 0.75*M/mn_grams * 1/(self.get_vol())\n\n return n_baryon\n\n def get_mass_grams(self):\n M_gram = 1.9885e33 * self.mass\n return M_gram\n\n def get_radius_cm(self):\n R_cm = self.radius*6.96e10\n return R_cm\n\n def get_vesc_surf(self):\n G = 6.6743*10**(-8) #cgs units\n M = self.get_mass_grams()\n R = self.get_radius_cm()\n Vesc = np.sqrt(2*G*M/R) # escape velocity(cm/s)\n return Vesc\n\n#####################################################################################\n#Stellar params\nM100 = PopIIIStar(100, 10**0.6147, 10**6.1470, 1.176e8, 32.3, 10**6)\nM300 = PopIIIStar(300, 10**0.8697, 10**6.8172, 1.245e8, 18.8, 10**6)\nM1000 = PopIIIStar(1000, 10**1.1090, 10**7.3047, 1.307e8, 10.49, 10**6)\nstars_list = (M300, M1000)\n\n#####################################################################################\n#Capture rate function as defined in IZ2019 which returns array of Cns up to a cutoff as well as Ctot\n#Uses decimal data type for greater accuracy (indicated by : D(variable))\n#Much faster to use for pure-hydrogen stars than Multi-component code\n\n#Function: captureN_pureH - Calculates an array of partial capture rates [C1, C2, C3, ..., C_Ncutoff] up to a cutoff\n# condition as well as the total capture rate (The sum of all partial capture rates) up to\n# a cutoff.\n#\n#Inputs: M - Mass of Pop III star in Solar masses\n# R - Radius of star in solar masses\n# Mchi - Mass of DM particle in GeV\n# rho_chi - Ambient DM Density in GeV/cm^3\n# vbar - DM Dispersion velocity, in cm/s\n# sigma_xenon - Accepts either True (Which indicates the use of XENON1T Bounds on sigma) OR\n# value of sigma you want to use if not using XENON1T Bounds on sigma\ndef captureN_pureH(star, Mchi, rho_chi, vbar, sigma_xenon):\n\n #Converting inputs to Decimal data type for greater accuracy\n M = D(star.mass)\n R = D(star.radius)\n Mchi = D(Mchi)\n rho_chi = D(rho_chi)\n vbar = D(vbar)\n\n # Defining Constants\n G = D(6.6743*10**(-8)) # gravitational constant in cgs units\n mn = D(0.93827) # mass of nucleons (protons) in star in GeV\n mn_grams = D(1.6726*10**(-24)) # mass of nucleon in grams\n\n #Converting Stellar properties to different units\n R = D(6.96e10)*R # convert radius to centimeters\n M = D(1.9885*(10**33))*M # convert mass to grams\n\n #Calculating important Qtys. dependent on stellar parameters\n Vesc = D(np.sqrt(2*G*M/R)) # escape velocity(cm/s)\n Vstar = D(4/3) * D(np.pi) * (R**3) # Volume of Star (cm^3)\n n = (D(0.75)*M/(mn_grams))/(Vstar) # Number density of hydrogen in star\n\n #Number density of DM particles\n nchi = rho_chi/Mchi # number density(cm^{-3})\n\n #Condition specifying cross-section to be used: True means X1T bound. Value means we use that value\n if (sigma_xenon == True):\n sigma = D(1.26*10**(-40))*(Mchi/D(10**8)) # DM cross section XIT\n else:\n if(type(sigma_xenon) == np.ndarray):\n sigma = D(sigma_xenon[0])\n else:\n sigma = D(sigma_xenon)\n\n # calculate Beta (reduced mass)\n beta = (4*(Mchi)*mn)/((Mchi+mn)**2)\n\n # Optical Depth, tau\n tau = 2 * R * sigma * n\n\n #Initialize Partial Capture rate and total capture rate as an empty list and populate first two elements\n # as place-holders\n Cn = [1, 1]\n Ctot = [1, 1]\n\n #Initialize N = 1\n N = 1\n\n #Counts how many times Cn is less than the previous Cn. A way to implement a cutoff condition\n less_count = 0\n\n #Loop runs until cutoff conditions met.\n #Cutoff conditions: If CN < C_N-1 a certain number of times (less_count) AND Ctot_N is within 0.1% of Ctot_N-1\n #This is a way to ensure the sum has converged without adding unnecessary terms\n\n while ((less_count <= 10 or abs(Ctot[N]/Ctot[N-1] - 1) > 0.001)):\n\n # increase N by 1 each iteration, calculating for a new Capture Rate.\n #NOTE: This means we start calculating at N = 2. BUT, we are essentially calculating for N-1 in each iteration\n # You will note this is the case when calculating VN, for example, where we use N-1 instead of N.\n # We do this because it is easier to compare the N capture rate with the previous capture rate.\n N += 1\n\n # caluclate p_tau, probability of N scatters\n pn_tau = D(2/tau**2)*D(N)*D(sc.gammainc(float(N+1),float(tau)))\n\n # calculate V_N, velocity of DM particle after N scatters\n VN = Vesc*D(1-((beta)/2))**D((-1*(N-1))/2)\n\n #FULL Partial capture rate equation, no approximations\n Cn_temp = D(np.pi)*(R**2)*pn_tau*((D(np.sqrt(2))*nchi)/(D(np.sqrt(3*np.pi))*vbar))*((((2*vbar**2)+(3*Vesc**2))-((2*vbar**2)+(3*VN**2))*(np.exp(-1*((3*((VN**2)-(Vesc**2)))/(2*(vbar**2)))))))\n\n #Populating partial cpature rate array\n Cn.append(Cn_temp)\n\n #Starts adding partial capture rates from Ctot[1], so there is an extra 1 added to capture rate. We subtract this\n # from the return value\n Ctot.append(Ctot[N-1]+Cn_temp)\n\n #Less_count condition summed. Look at cutoff condition\n if(Cn[N] < Cn[N-1]):\n less_count += 1\n\n #Remove first two place-holder elements\n Cn.pop(0)\n Cn.pop(0)\n\n #Returns list: [Cn list, Ctot]\n return Cn, Ctot[-1]-1\n\n#Inputs: Star - PopIII star\n# mx - Mass of DM particle in GeV\n# rhox - Ambient DM Density in GeV/cm^3\n# vbar - DM Dispersion velocity, in cm/s\n# sigma - DM scattering cross-section in cm^2\n\n\ndef capture_regionI(mx, star, rhox, vbar, sigma):\n cap = 5.3e28 * (rhox/10**14) * sigma/(1.26e-40) * ((10**8)/mx)**2 * ((10**6)/vbar)**3 * star.mass**3 * star.radius**(-2)\n return cap\n\ndef capture_regionII(mx, star, rhox, vbar):\n cap = 8e43 * (rhox/10**14) * (10**2/mx) * (10**6/vbar) * star.mass * star.radius\n return cap\n\ndef capture_regionIII(mx, star, rhox, vbar, sigma):\n cap = 5.4e38 * rhox/(10**14) * sigma/(1.26e-40) * (10**2/mx) * (10**6/vbar) * star.mass**2 * star.radius**(-1)\n return cap\n\ndef capture_regionIV(mx, star, rhox, vbar, sigma):\n cap = capture_regionI(mx, star, rhox, vbar, sigma)\n return cap\n\n\ndef capture_analytic(mx, star, rhox, vbar, sigma):\n #Finding parameters defining regions to determine which analytic equation to choose\n sig_tau1 = 0.5 * star.get_radius_cm()**-1 * star.get_num_density()**-1\n mx_k1 = 3 * 0.938 * star.get_vesc_surf()**2/vbar**2\n tau = 2 * star.get_radius_cm() * sigma * star.get_num_density()\n k = 3 * 0.938 * star.get_vesc_surf()**2/(mx * vbar**2)\n\n\n if((sigma >= sig_tau1) and (k*tau <= 1)):\n cap = capture_regionI(mx, star, rhox, vbar, sigma)\n elif((sigma >= sig_tau1) and (k*tau > 1)):\n cap = capture_regionII(mx, star, rhox, vbar)\n elif((sigma < sig_tau1) and (mx <= mx_k1)):\n cap = capture_regionIII(mx, star, rhox, vbar, sigma)\n else:\n cap = capture_regionIV(mx, star, rhox, vbar, sigma)\n\n return cap\n\n# Calculates upper bounds on the DM-nucleon scattering cross-section, sigma, as a function of DM\n# and stellar parameters.\n#\n# NOTE: VERY IMPORTANT --> This code is not perfect and is limited in finding bounds for DM densities\n# below ~ 10^16 GeV/cm^3 and above ~ 10^19 GeV/cm^3 (I have not yet figured out why yet, if you have\n# any clue Id love to hear!!). However, there is sort of a workaround with this. If you look at the\n# most recent papers we put out, you will find a scaling relationship like LDM ~ Ctot ~ rho_chi, where\n# rho_chi is the ambient DM density. Because this is the case, the bounds that we place on sigma end\n# up scaling like sigma ~ 1/rho_chi. Intuitively it makes sense, because higher densities will naturally\n# lead to greater capture, which has the effect of tightening the bounds (Look at the discussions in the\n# most recent paper or just HMU if that doesn't make sense). SOO, my ultimate point is that I suggest\n# you get the bounds for 10^19 GeV/cm^3 for example and than you can just multiply by factors of 10 to\n# get the bounds for different densities. For example, if i get bounds for rho_chi = 10^19 GeV/cm^3 and\n# I want bounds for rho_chi = 10^15 GeV/cm^3, I just multiply the bounds on sigma by a factor of 10^5\n# (or to be more technical, divide by a factor of 10^-5).\n\n\n# Function: sigma_mchi_pureH\n\n# Description: The way this code works is to make guesses for sigma that will produce a capture rate\n# to make a given Pop III star eddington-limited. This involves guessing values of sigma,\n# calculating the corresponding DM capture rate from that value of sigma (and the other params)\n# and then comparing it to the capture rate pushing the star to the eddington limit.\n# To compare the guessed capture rate, I imposed the artificial condition (after much guess work)\n# as Ctot_guess/Ctot_Edd <= 10^0.004. Note this has been recast through logarithms in the while loop.\n# After a given sigma is guessed, the capture rates are compared using the condition described\n# above, and if it is within the range then we guessed right! If not, we find a value I call \"Rate\"\n# which just tells me how far off the guess is, and then we use that rate value to adjust our value of\n# sigma to be closer to the possibly correct value. This basically keeps going until a guess is found\n# OR seemingly forever (MAKE SURE TO READ THE NOTE ABOVE).\n\n\n# Input: M - Mass of star in solar masses\n# R - Radius of star in solar radii\n# L - Stellar Luminosity in solar luminosities\n# Mchi - mass of DM particle in GeV\n# rho_chi - ambient DM density in GeV/cm^3\n# vabr - DM dispersion velocity in cm/s\n\ndef sigma_mchi_pureH(star, Mchi, rho_chi, vbar, t): #Takes M, R and L in solar units\n\n #Fraction of annihilations in luminosity\n f = 1\n\n #Solar luminosity in erg/s\n Lsun = 3.846e33;\n\n #Convert luminosity to erg/s from solar luminosities\n L = star.lum*Lsun\n\n #Convert DM mass to Ergs\n Mchi_erg = Mchi * (1/624.15)\n\n #Calculating Eddington Luminosity for given Stellar Mass, See Eq. 1.5 of companion\n LeddFactor = 3.7142e4;\n Ledd = Lsun*star.mass*LeddFactor\n\n #DM Capture rate for measuring a star of mass M shining at eddington limit due to additional DM luminosity\n #See Eq. 1.4 of companion paper\n Ctot_atEdd = (Ledd - L)/(f*Mchi_erg)\n\n #First guess for sigma based on Xenon1T bounds\n sigma = (1.26*10**(-40))*(Mchi/(10**8))\n\n\n #First guess for Ctot\n Ctot = float(captureN_pureH(star, Mchi, float(rho_chi), vbar, sigma)[1])\n #Ctot = F_capture(Mchi, star, float(rho_chi), vbar, sigma, t)\n\n #Sigma is found when log(Ctot_num) - log(Ctot_true) becomes less than stipulated\n # See Eq. 2.2-2.3 of companion paper for conditions\n while(abs(np.log10(Ctot) - np.log10(Ctot_atEdd)) > 0.004):\n\n #Rate at which sigma is multiplied/divided by to get closer and closer to true Capture Rate\n #See Eq. 2.4 of companion paper\n rate = abs(np.log10(Ctot) - np.log10(Ctot_atEdd))*10\n\n #Tells whether divide or multiply by rate depending on if our guess is too big or too small\n if (Ctot/Ctot_atEdd > 1):\n sigma = sigma * (1/rate)\n else:\n sigma = sigma * rate\n\n # Recalculates new guess for Ctot\n Ctot = float(captureN_pureH(star, Mchi, float(rho_chi), vbar, sigma)[1])\n #Ctot = F_capture(Mchi, star, float(rho_chi), vbar, sigma, t)\n\n return sigma\n\n\ndef sigma_Nx(star, mchi, rho_chi, vbar, t):\n\n #Nx Limit\n Nx_limit = (5*10**48)*(mchi/10**3)**-3\n\n #First guess for sigma based on Xenon1T bounds\n sigma = (1.26*10**(-40))*(mchi/(10**8))\n\n\n #First guess for Nx\n #Nx = float(Nx_t_diff(mchi, rho_chi, vbar, sigma, star, t))\n Nx = float(Nx_analytic(mchi, rho_chi, vbar, sigma, star, t))\n\n #Sigma is found when log(Ctot_num) - log(Ctot_true) becomes less than stipulated\n # See Eq. 2.2-2.3 of companion paper for conditions\n while(abs(np.log10(Nx) - np.log10(Nx_limit)) > 0.004):\n\n #Rate at which sigma is multiplied/divided by to get closer and closer to true Capture Rate\n #See Eq. 2.4 of companion paper\n rate = abs(np.log10(Nx) - np.log10(Nx_limit))*10\n\n #Tells whether divide or multiply by rate depending on if our guess is too big or too small\n if (Nx/Nx_limit > 1):\n sigma = sigma * (1/rate)\n else:\n sigma = sigma * rate\n\n # Recalculates new guess for Ctot\n #Nx = float(Nx_t_diff(mchi, rho_chi, vbar, sigma, star, t))\n Nx = float(Nx_analytic(mchi, rho_chi, vbar, sigma, star, t))\n\n print('sigma: ' +str(sigma))\n\n return sigma\n\ndef sigma_Nx_Ca(star, mchi, rho_chi, vbar, t):\n\n #Nx Limit\n Nx_limit = (5*10**48)*(mchi/10**3)**-3\n\n #First guess for sigma based on Xenon1T bounds\n sigma = (1.26*10**(-40))*(mchi/(10**8))\n\n E = 0\n\n\n #First guess for Nx\n Nx = float(Nx_t_diff_Ca(mchi, rho_chi, vbar, sigma, star, t))\n\n #Sigma is found when log(Ctot_num) - log(Ctot_true) becomes less than stipulated\n # See Eq. 2.2-2.3 of companion paper for conditions\n while(abs(np.log10(Nx) - np.log10(Nx_limit)) > 0.004):\n\n #Rate at which sigma is multiplied/divided by to get closer and closer to true Capture Rate\n #See Eq. 2.4 of companion paper\n rate = abs(np.log10(Nx) - np.log10(Nx_limit))\n\n #Tells whether divide or multiply by rate depending on if our guess is too big or too small\n if (Nx/Nx_limit > 1):\n sigma = sigma * (1/rate)\n else:\n sigma = sigma * rate\n\n # Recalculates new guess for Ctot\n Nx = float(Nx_t_diff_Ca(mchi, rho_chi, vbar, sigma, star, t))\n\n return sigma\n\n\n\ndef rhoSigma_mchi_pureH(M, R, L, Mchi, vbar, smallRv, smallTaus):\n\n #Solar Luminosity in erg/s\n Lsun = 3.846e33\n f = 2/3\n f_hy = 0.75\n\n #Convert luminosity to GeV/s from solar luminosities\n L = L*Lsun*624.15\n Lnuc = L\n\n LeddFactor = 3.7142e4\n Ledd = Lsun*M*LeddFactor*624.15\n\n G = 6.6743e-8 # gravitational constant in cgs units\n R = (6.96e10)*R # convert radius to centimeters\n M = 1.9885e33*M # convert mass to grams\n vesc = np.power(2*G*M/R, 1/2) #Calculates Escape Velocity in cm/s\n m_p = 0.93827 # mass of protons in star in GeV\n m_p_grams = 1.6726*10**(-24) # mass of proton in grams\n\n #Calculate beta and \n beta_hy = (4*(Mchi)*m_p)/((Mchi+m_p)**2)\n z_avg_hy = 0.5\n\n if (not smallRv):\n if (smallTaus):\n rhoSig = np.sqrt(3 * np.pi/2) * vbar * (Ledd - Lnuc) * m_p_grams / (f_hy*f * M * ( 3 * vesc**2))\n\n else:\n if (smallTaus):\n rhoSig = np.sqrt(2 * np.pi/3) * vbar**3 * (Ledd - Lnuc) * m_p_grams / (f_hy*3 * f * M * beta_hy * z_avg_hy *(1 + beta_hy * z_avg_hy) * vesc**4)\n\n else:\n rhoSig = 2 * np.sqrt(2 * np.pi/3) * vbar**3 * (Ledd - Lnuc) * m_p_grams / (f_hy*3 * f * M * beta_hy * z_avg_hy *(2 + 5 * beta_hy * z_avg_hy) * vesc**4)\n\n\n return rhoSig\n\n\ndef rhoSigma_mchi_pureH_T(M, R, L, Mchi, vbar):\n #Solar Luminosity in erg/s\n Lsun = 3.846e33\n f = 2/3\n f_hy = 0.75\n\n #Convert luminosity to GeV/s from solar luminosities\n L = L*Lsun*624.15\n Lnuc = L\n\n LeddFactor = 3.7142e4\n Ledd = Lsun*M*LeddFactor*624.15\n\n G = 6.6743e-8 # gravitational constant in cgs units\n R = (6.96e10)*R # convert radius to centimeters\n M = 1.9885e33*M # convert mass to grams\n vesc = np.power(2*G*M/R, 1/2) #Calculates Escape Velocity in cm/s\n m_p = 0.93827 # mass of protons in star in GeV\n m_p_grams = 1.6726*10**(-24) # mass of proton in grams\n\n #Calculate beta and \n beta_hy = (4*(Mchi)*m_p)/((Mchi+m_p)**2)\n z_avg_hy = 0.5\n\n\n rhoSig = np.sqrt(np.pi/6) * R * vbar**5 * (Ledd - Lnuc) * Mchi / (f_hy*f * G * M**2 * (3 * vbar**2 * vesc**2))\n\n #rhoSig =\n\n return rhoSig * (1.78 * 10**-24)\n\ndef rho_mchi_pureH(M, R, L, Mchi, vbar, sigma):\n Lsun = 3.846e33;\n\n #Convert luminosity to erg/s from solar luminosities\n L = L*Lsun\n\n #Convert DM mass to Ergs\n Mchi_erg = Mchi * (1/624.15)\n\n #Calculating Eddington Luminosity for given Stellar Mass\n LeddFactor = 3.7142e4;\n Ledd = Lsun*M*LeddFactor\n\n #DM Capture rate for measuring a star of mass M shining at eddington limit due to additional DM luminosity\n Ctot_atEdd = D((Ledd - L)/((2/3)*Mchi_erg))\n\n #First guess for rho_chi\n rho_chi = 10**19\n\n #First guess for Ctot\n Ctot = captureN_pureH(M, R, Mchi, float(rho_chi), vbar, sigma)[1]\n\n #Sigma is found when log(Ctot_num) - log(Ctot_true) becomes less than stipulated\n while(abs(np.log10(Ctot) - np.log10(Ctot_atEdd)) > 0.004):\n\n #Rate at which sigma is multiplied/divided by to get closer and closer to true Capture Rate\n rate = abs(np.log10(Ctot) - np.log10(Ctot_atEdd))*10\n\n #Tells whether divide or multiply by rate depending on if our guess is too big or too small\n if (Ctot/Ctot_atEdd > 1):\n rho_chi = rho_chi * (1/rate)\n else:\n rho_chi = rho_chi * rate\n\n\n # Recalculates new guess for Ctot\n Ctot = captureN_pureH(M, R, Mchi, float(rho_chi), vbar, sigma)[1]\n\n return float(rho_chi)\n\n\ndef rho_mchi_pureH_SA(M, R, L, Mchi, vbar, sigma, smallRv, smallTau):\n\n # Solar Luminosity in erg/s\n Lsun = 3.846e33\n\n #Calculating Eddington Luminosity for given Stellar Mass (in Solar masses) in GeV/s\n LeddFactor = 3.7142e4\n Ledd = Lsun*M*LeddFactor*624.15\n\n #Convert luminosity to GeV/s from solar luminosities\n L = L*Lsun*624.15\n\n # Define some constants\n G = 6.6743*10**(-8) # gravitational constant in cgs units\n mp = 0.93827\n mp_grams = 1.6726*10**(-24) # mass of nucleon in grams\n f = 2/3 #Fraction of DM energy useful as deposited energy to star\n z = 0.5 #average kinematic variable\n\n #Conversions\n R = 6.96e10*R # convert radius to centimeters\n M = 1.9885*(10**33)*M # convert mass to grams\n Mchi_grams = (1.782 * 10**(-24)) * Mchi\n\n #Escape Velocity\n vesc = np.sqrt(2*G*M/R) # escape velocity(cm/s)\n\n\n # If true is passed to sigma, we use XENON1T 1-year bounds\n if (sigma == True):\n sigma = 1.26*10**(-40)*(Mchi/10**8) # DM cross section XIT\n\n #Reduced mass\n beta = (4*(Mchi)*mp)/((Mchi+mp)**2)\n\n #SA Expressions depedning on limiting regimes\n if (smallTau):\n if (not smallRv):\n rho = np.sqrt(3 * np.pi/2) * mp_grams * (Ledd - L) * vbar * (1/(f*M*sigma)) * (1/(2 * vbar**2 + 3 * vesc**2))\n else:\n rho = np.sqrt(2 * np.pi / 27) * mp_grams * (Ledd - L) * vbar**3 * (1/(f*M*sigma)) * (1/(vesc**4)) * (1/(z*beta*(1+z*beta)))\n else:\n rho = np.sqrt(np.pi/6) * Mchi_grams**3 * (Ledd - L) * R * vbar**5 * (1/(G * f * M**2 * sigma)) * (1/(27 * mp_grams**2 * vesc**4 - Mchi_grams**2 * (4 * vbar**4 - 6 * vbar**2 * vesc**2)))\n\n\n return rho\n\n#######################################################################################\n#Calculating Ca and tau_eq\n\n#Retrieves solution to laneEmden n=3\ndef retrieve_LaneEmden():\n xis = []\n theta_arr = []\n with open('Lane_Emden.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n xis.append(float(row[0]))\n theta_arr.append(float(row[1]))\n\n return (xis, theta_arr)\n\n# Solution to laneEmden Equation\nxis, theta_arr = retrieve_LaneEmden()\n# interpolating points for theta function\ntheta = UnivariateSpline(xis, theta_arr, k = 5, s = 0)\n#FITTING FUNCTION FOR theta**3\ntheta_cube = UnivariateSpline(xis, np.array(theta_arr)**3, k = 5, s = 0)\n\n#Density at center of polytrope\ndef polytrope3_rhoc(star):\n\n #Getting stellar params\n Mstar = star.get_mass_grams() #grams\n Rstar = star.get_radius_cm() #cm\n\n #x-intercept of the theta function\n xi_1 = xis[-1]\n\n #Slope of laneEmden at Theta = 0\n deriv_xi1 = theta.derivatives(xis[-1])[1]\n\n #Central polytropic density as per n=3 polytropic model\n rhoc_poly = (-1/(4*np.pi)) * ((xi_1/Rstar)**3) * (Mstar/(xi_1**2)) * (deriv_xi1)**-1 #g/cm^3\n\n return rhoc_poly\n\n#Polytropic potential\ndef potential_poly(xi, star):\n G = 6.6743*10**(-8) # gravitational constant in cgs units\n\n phi_xi = 4*np.pi*G*(polytrope3_rhoc(star))*(star.get_radius_cm()/xis[-1])**2 * (1 - theta(xi)) #cgs units\n\n return phi_xi\n\n#Retrieves tau(mx) from stored data\ndef retrieve_tau(star):\n mx = []\n tau = []\n with open('tau_mx_M%i.csv'%star.mass) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n mx.append(float(row[0]))\n tau.append(float(row[1]))\n\n return (mx, tau)\n\ntau_fit_funcs = []\n#Tau fits and function that will give output based on mx and star\nfor star in stars_list:\n mx_tau_fit, tau_temp = retrieve_tau(star)\n tau_fit_funcs.append(UnivariateSpline(mx_tau_fit, tau_temp, k = 5, s = 0))\n\ndef tau_fit(mx, star): #Returns tau from fitting function based on star and dm mass\n if(mx > 100):\n tau_val = 1\n else:\n## if(star.mass == 100):\n## tau_val = tau_fit_funcs[0](mx)\n if(star.mass == 300):\n tau_val = tau_fit_funcs[0](mx)\n elif(star.mass == 1000):\n tau_val = tau_fit_funcs[1](mx)\n else:\n tau_val = 1\n return tau_val\n\n#Isotropic DM distribution using potential from n=3 polytrope\ndef nx_xi(mx, xi, star): #Normalized\n\n kb = 1.380649e-16 #Boltzmann constant in cgs Units (erg/K)\n\n #Finding Tx using Temperature function\n Tx = tau_fit(mx, star) * 10**8 #K\n\n #mx in g\n mx_g = mx*1.783e-24\n\n #Numerical DM number density profile for each DM mass (normalized)\n nx_xi_val = np.exp(-mx_g*potential_poly(xi, star)/(kb*Tx))\n\n return nx_xi_val\n\ndef Ca_321(mx, star):\n #sigv^2 given by thermal freezeout\n sigv = 10**3/(mx**3)\n sigv_cg = sigv * (1.52e24) * (5.06e13)**(-6) #Convert to cm^6/s\n\n #Defining top and bottom integrands using Fully polytropic approximation\n def integrand_top_Ca(xi, mx, star):\n return 4*np.pi*(star.get_radius_cm()/xis[-1])**3 * (polytrope3_rhoc(star)*0.75/1.6726e-24) * sigv_cg * xi**2 * nx_xi(mx, xi, star)**2 * theta(xi)**3\n def integrand_bottom_Ca(xi, mx, star):\n return 4*np.pi*(star.get_radius_cm()/xis[-1])**3 * xi**2 * nx_xi(mx, xi, star)\n\n #Integrate over star\n return quad(integrand_top_Ca, 0, xis[-1], args=(mx, star))[0]/quad(integrand_bottom_Ca, 0, xis[-1], args=(mx, star))[0]**2\n\n#Equilibration timescale -- 2DM + 1SM interactions\ndef tau_eq_321(mx, star, rho_chi, vbar, sigma_xenon = False):\n #Switch for which sigma to use\n if (sigma_xenon == True):\n sigma = 1.26*10**(-40)*(mx/10**8)\n else:\n sigma = sigma_xenon\n\n #Calculating the DM capture rate\n C = float(captureN_pureH(star, mx, rho_chi, vbar, sigma)[1])\n #C = float(capture_analytic(mx, star, rho_chi, vbar, sigma))\n\n\n #Annihlation coefficient\n Ca = Ca_321(mx, star)\n\n\n #Equilibration timescale\n tau_eq = (C * Ca)**(-1/2)\n\n return tau_eq\n\n######################################################################################\n#Evaporation Rate calculated with polytropes from Ian's code\n\nmchi_300M_dat = []\nE_300M_dat = []\nwith open('E_300M_Madison.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n mchi_300M_dat.append(float(row[0]))\n E_300M_dat.append(float(row[1]))\n\nmchi_1000M_dat = []\nE_1000M_dat = []\nwith open('E_1000M_Madison.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n mchi_1000M_dat.append(float(row[0]))\n E_1000M_dat.append(float(row[1]))\n\n\n#Approximate DM Evaporation rate\ndef evap_coeff_Ilie_approx2(mx, sigma, star):\n #Central proton number density (cm^-3)\n nc = polytrope3_rhoc(star)*0.75/1.6726e-24\n\n #Edge of star in xis\n xi1 = xis[-1]\n\n #Central proton speed (Normalized to vesc)\n uc = proton_speed(xis[0], star)\n\n vesc = 1\n vesc_val = star.get_vesc_surf()\n\n #Dimensionless QTYs\n tau = tau_fit(mx, star)#Normalized DM Temperature\n mu = mx/0.93827 #Normalized DM mass\n\n #Analytic form of the DM evaporation rate\n E = 9/np.sqrt(np.pi) * 1/(xi1**3) * sigma * nc * uc *vesc_val * np.exp(-1/uc**2 * mu/tau * (1 + xi1/2)) * star.get_vol()/Vj_Eff_SP(star,mx , 1)\n\n return E\n\n#l(r), most probable dimensionless velocity of protons at specific point in star\ndef proton_speed(xi, star):\n kb = 1.380649e-16 #Boltzmann constant in cgs Units (erg/K)\n Tc = 10**8 #Central star temperature taken to be ~ 10^8 K\n\n u = np.sqrt(2*kb*Tc*theta(xi)/1.6726219e-24) #cm/s (cgs units)\n\n l = u/star.get_vesc_surf()\n\n return l\n\n\n######################################################################################\n#Calculating kappa\n\ndef kappa_evap321(mx, sigma, star, rho_chi, vbar, E):\n #Evaporation rate from csv files\n## if star.mass == 300:\n## E = E_300M_dat\n## elif star.mass == 1000:\n## E = E_1000M_dat\n## else:\n## print(\"star mass must be 300 or 1000 solar masses\")\n\n #Equilibration timescale\n tau_eq = tau_eq_321(mx, star, rho_chi, vbar, sigma, sigma_xenon = False)\n\n #Definition of Kappa parameter\n kap = (1 + (E*tau_eq/2)**2)**(1/2)\n\n return kap\n\n######################################################################################\n#Calculating Nchi\n\ndef N_chi_func_32(mx, sigma, star, rho_chi, vbar, E):\n\n tau_eq = tau_eq_321(mx, star, rho_chi, vbar, sigma, sigma_xenon = False)\n C_tot = capture_analytic(mx, star, rho_chi, vbar, sigma)\n C_a = Ca_321(mx, star)\n k = kappa_evap321(mx, sigma, star, rho_chi, vbar, E)\n\n N_chi = ((C_tot/C_a)**(1/2))*(1/(k + ((1/2)*E*tau_eq)))\n\n return N_chi\n\n############################################################################################################################################\n#Functions for WIMP DM\n\n\n#Constants Definition\nkb = 1.380649e-16 #Boltzmann constant in cgs Units (erg/K)\nG = 6.6743*10**(-8) # gravitational constant in cgs units\nhbar = 1.05e-27 #hbar in SI units\nc = 3e10 #Speed of light in SI units\n\n\n#Calculates effective radius of DM core --> SPERGEL: https://ui.adsabs.harvard.edu/abs/1985ApJ...294..663S/abstract\ndef rx_Eff_SP(star, mx):\n #T_central ~ 10^8 K\n T = 10**8\n\n #Taking central density from polytropic prescription for consistency\n rhoc = polytrope3_rhoc(star)\n mx_g = mx * 1.783e-24 #Converting GeV/c^2 to g\n\n #Effective radius in cm\n rx = np.sqrt(9 * kb * T * 1/(4*np.pi*G*rhoc*mx_g) )\n\n return rx\n\n#Effective volumes, analytic form --> SPERGEL: https://ui.adsabs.harvard.edu/abs/1985ApJ...294..663S/abstract\ndef Vj_Eff_calcFunction_SP(R, j, rx):\n #Vj in cm^3\n Vj = (2 * np.pi * rx**2 / ( (9) * (j**(3/2))) ) * ( ( -6 * np.exp(-3 * j * R**2 / (2*(rx**2))) * np.sqrt(j) * R) + ( np.sqrt(6*np.pi) * rx * sc.erf(np.sqrt(3*j/2) * R/rx) ) )\n\n return Vj\n\n#Effective volumes, analytic form --> SPERGEL: https://ui.adsabs.harvard.edu/abs/1985ApJ...294..663S/abstract\ndef Vj_Eff_SP(star, mx, j):\n R = star.get_radius_cm() #converting R to cm\n rx = rx_Eff_SP(star, mx)\n\n #Vj in cm^3\n Vj = Vj_Eff_calcFunction_SP(R, j, rx)\n\n return Vj\n\n#Lower bounds on sigmaV due to demand that equilibriation time is much less than the age of the star\n#For sigma, we have the choice of using the bounds we place OR the XENON1T bounds\n#Ann_type: 22 = 2-->2 annihilation\n# 32 = 3-->2 annihilation (3 DM --> 2 DM)\n# 321 = 3-->2 annihilation (2DM + SM --> SM + DM)\ndef sigV_lowerBound(star, frac_life, mx, rho_chi, vbar, sigma, Ann_type): #Using effective volumes\n\n #Determining which value to use for sigma\n## if (sigma_xenon == True):\n## sigma = 1.26*10**(-40)*(mx/10**8)\n## else:\n## #rho_adjust = 10**19/rho_chi\n## #sigma = sigma_mchi_pureH(star, mx, 10**19, vbar) * rho_adjust\n## sigma = sigma_xenon\n\n #Calculating the DM capture rate\n C = float(captureN_pureH(star, mx, rho_chi, vbar, sigma)[1])\n #C = float(capture_analytic(sigma, star, mx, rho_chi, vbar))\n\n #Calculating effective volumes using SPERGEL method\n V1 = Vj_Eff_SP(star, mx, 1)\n V2 = Vj_Eff_SP(star, mx, 2)\n V3 = Vj_Eff_SP(star, mx, 3)\n\n #Lower bound depends on form of annihilations\n if (Ann_type == 22):\n #Calculating lower bound in sigmaV for 2-->2 annihilations (See FREESE: arXiv:0802.1724v4)\n Veff_22 = V1**2 * V2**-1\n sigmav_lower = (frac_life * (star.lifetime)*31556952)**-2 * C**-1 * Veff_22 #Units of cm^3/s\n elif(Ann_type == 32):\n #Calculating lower bound in sigmaV for 3-->2 annihilations,(3 DM --> 2 DM)\n Veff_32 = np.sqrt(V1**3/V3) # (See Exoplanet, Leane: arXiv:2010.00015)\n sigmav_lower = (Veff_32**2 * C**-1 * (frac_life * (star.lifetime)*31556952)**-2) * ((5.06e13)**6 * (1.52e24)**-1) #Natural units: GeV^-5\n\n elif(Ann_type == 321):\n #22 effective volumes\n Veff_22 = V1**2 * V2**-1 # (See Exoplanet, Leane: arXiv:2010.00015)\n\n #Number density of SM particles in star\n n_sm = polytrope3_rhoc(star)/1.6726e-24\n\n #Lower bound in sigmaV for 3-->2 annihilation (2DM + SM --> SM + DM)\n sigmav_lower = (C**-1 * (frac_life * (star.lifetime)*31556952)**-2 * Veff_22 * n_sm**-1) * (5.06e13)**6 * (1.52e24)**-1 #Natural units: GeV^-5\n\n return sigmav_lower\n\n#Isotropic DM distribution using potential from n=3 polytrope\ndef nx_r(mx, r, star): #Normalized\n xi = 6.81 * r/star.get_radius_cm()\n kb = 1.380649e-16 #Boltzmann constant in cgs Units (erg/K)\n Tx = tau_fit(mx, star) * 10**8 #K\n #mx in g\n mx_g = mx*1.783e-24\n\n #Numerical DM number density profile for each DM mass (normalized)\n nx_xi_val = np.exp(-mx_g*potential_poly(xi, star)/(kb*Tx))\n\n return nx_xi_val\n\n#Annihilation coefficient -- 2-->2\ndef Ca_22(mx, star, rho_chi, vbar, sigma):\n #sigv given by lower bounds\n #sigv = sigV_lowerBound(star, 0.01, mx, rho_chi, vbar, sigma, 22)\n sigv = 3*10**-26\n\n radius = star.get_radius_cm()\n thermal_radius = ((9*kb*star.core_temp)/(8*G*np.pi*star.core_density*mx*1.78*10**-24))**(1/2)\n #print(thermal_radius)\n \n\n #Defining top and bottom integrands using Fully polytropic approximation\n def integrand_top_Ca(xi, mx, star):\n return 4*np.pi*(thermal_radius/xis[-1])**3 * sigv * xi**2 * nx_xi(mx, xi, star)**2\n\n def integrand_bottom_Ca(xi, mx, star):\n return 4*np.pi*(thermal_radius/xis[-1])**3 * xi**2 * nx_xi(mx, xi, star)\n\n\n def integrand_top_Ca_cgs(r, mx, star):\n return 4*np.pi * sigv * r**2 * nx_r(mx, r, star)**2\n def integrand_bottom_Ca_cgs(r, mx, star):\n return 4*np.pi * r**2 * nx_r(mx, r, star)\n\n #print(integrand_top_Ca(xis[-1], mx, star))\n #print(integrand_bottom_Ca(xis[-1], mx, star))\n\n if mx <= 10**3:\n Ca = quad(integrand_top_Ca_cgs, 0, thermal_radius, args=(mx, star))[0]/quad(integrand_bottom_Ca_cgs, 0, thermal_radius, args=(mx, star))[0]**2\n #Ca = 10**(1.2*np.log(mx)-56.2)\n else:\n Ca = quad(integrand_top_Ca_cgs, 0, thermal_radius, args=(mx, star))[0]/quad(integrand_bottom_Ca_cgs, 0, thermal_radius, args=(mx, star))[0]**2\n #Ca = 10**(1.2*np.log(mx)-56.2)\n\n\n #Ca = quad(integrand_top_Ca_cgs, 0, star.get_radius_cm(), args=(mx, star))[0]/quad(integrand_bottom_Ca_cgs, 0, star.get_radius_cm(), args=(mx, star))[0]**2\n\n #print(\"Ca: \" + str(Ca))\n\n #Integrate over star\n return Ca\n\n#Equilibration timescale -- 2-->2\ndef tau_eq_22(mx, star, rho_chi, vbar, sigma):\n #Switch for which sigma to use\n## if (sigma_xenon == True):\n## sigma = 1.26*10**(-40)*(mx/10**8)\n## else:\n## sigma = sigma_xenon\n\n #Calculating the DM capture rate\n C = float(captureN_pureH(star, mx, rho_chi, vbar, sigma)[1])\n #C = float(capture_analytic(sigma, star, mx, rho_chi, vbar))\n\n #Annihlation coefficient\n Ca = Ca_22(mx, star, rho_chi, vbar, sigma)\n\n #Equilibration timescale\n tau_eq = (C * Ca)**(-1/2)\n\n return tau_eq\n\ndef kappa_evap22(mx, sigma, star, rho_chi, vbar, E):\n\n tau_eq = tau_eq_22(mx, star, rho_chi, vbar, sigma)\n\n kap = (1 + (E*tau_eq/2)**2)**(1/2)\n\n return kap\n\ndef N_chi_func_22(mx, sigma, star, rho_chi, vbar, E):\n\n tau_eq = tau_eq_22(mx, star, rho_chi, vbar, sigma)\n #C_tot = float(captureN_pureH(star, mx, rho_chi, vbar, sigma)[1])\n C_tot = float(capture_analytic(sigma, star, mx, rho_chi, vbar))\n C_a = Ca_22(mx, star, rho_chi, vbar, sigma)\n k = kappa_evap22(mx, sigma, star, rho_chi, vbar, E)\n\n N_chi = ((C_tot/C_a)**(1/2))#*(1/(k + ((1/2)*E*tau_eq)))\n\n return N_chi\n\n################################################################################################################\n#Effective Volume Section\n\n#Defining integrand for effective volumes\ndef integrand_Vj_poly3(xi, j, mx, star):\n return xi**2 * (nx_xi(mx, xi, star))**j\n\n#Numerically integrating to get effective volumes for polytropes\ndef Vj_poly3(j, mx, star):\n xi_1 = xis[-1]\n factor = 4*np.pi*(star.get_radius_cm()/xi_1)**3 #Outside integral factor\n int_val = quad(integrand_Vj_poly3, 0, xi_1, args=(j, mx, star)) #Integrate nx/nc * xi**2 over star\n Vj = factor * int_val[0] #cm^3\n return Vj\n\n\n#################################################################################################################\n#Functions from 1012.2039 Paper\n\n#Critical cross section for Sun-like star\ndef sigcrit_sunlike(star, t):\n\n sigcrit = (4*10**-36)*(star.radius**2)*(star.mass**-1)#*(1/(1-(t/star.lifetime)))\n\n return sigcrit\n\n#Probability of at least one scattering of WIMP in Sun-lik star\ndef f_sunlike(sigma, star, t):\n\n f = 0.89*(sigma/sigcrit_sunlike(star, t))\n\n return f\n\n#Maximum energy of the WIMP per WIMP mass that can lead to a capture\ndef E0(mx, star):\n\n E0 = 2*G*(1.67*10**24)*(1/(mx*1.78*10**-24))*(star.get_mass_grams()/star.get_radius_cm())\n\n return E0\n\n#WIMP Capture Rate\ndef F_capture(mx, star, rho_chi, vbar, sigma, t):\n\n F = (1.1*10**27)*(rho_chi/0.3)*(2.2*10**7/vbar)*(1/mx**-3)*(star.mass)*(star.radius)*(1-np.exp(-3*E0(mx, star)/vbar**2))*f_sunlike(sigma, star, t)\n\n return F\n\n#Calculates total number of DM particles in the star at a given time, t\ndef Nx_t_diff(mx, rho_chi, vbar, sigma, star, t_1):\n\n #relevant paramters\n Ctot = F_capture(mx, star, rho_chi, vbar, sigma, t_1) #s^-1\n #Ctot = float(captureN_pureH(star, mx, float(rho_chi), vbar, sigma)[1])\n \n #Differential equation function\n dNxdt = lambda t, Nx, Ctot = Ctot: Ctot\n \n #Nx(t)\n sol = solve_ivp(dNxdt, (0, t_1), [0], t_eval = [t_1])\n \n #Nx_t1 = # Of DM particles at t1\n Nx_t1 = sol.y[0][0]\n \n return Nx_t1\n\ndef Nx_analytic(mx, rho_chi, vbar, sigma, star, t):\n\n #relevant paramters\n Ctot = F_capture(mx, star, rho_chi, vbar, sigma, t) #s^-1\n #Ctot = float(captureN_pureH(star, mx, float(rho_chi), vbar, sigma)[1])\n\n t_s = t * 3.154*10**7\n\n Nx_t = Ctot * t_s\n\n return Nx_t\n \n\n#Calculates total number of DM particles in the star at a given time, t\ndef Nx_t_diff_Ca(mx, rho_chi, vbar, sigma, star, t_1):\n \n\n #relevant paramters\n #Ctot = float(captureN_pureH(star, mx, float(rho_chi), vbar, sigma)[1]) #s^-1\n Ctot = F_capture(mx, star, rho_chi, vbar, sigma, t_1)\n Ca = Ca_22(mx, star, rho_chi, vbar, sigma)\n #E = evap_coeff_Ilie_approx2(mx, sigma, star)\n \n #Differential equation function\n dNxdt = lambda t, Nx, Ctot = Ctot, Ca = Ca, E = E: Ctot - Ca*Nx**3\n \n #Nx(t)\n sol = solve_ivp(dNxdt, (0, t_1), [0], t_eval = [t_1])\n \n #Nx_t1 = # Of DM particles at t1\n Nx_t1 = sol.y[0][0]\n\n print(Nx_t1)\n \n return Nx_t1\n\n\n################################################################################################################\n#Functions from Sebastian Ellis Paper\n\ndef self_gravitation_cond(star, mx):\n\n #converting to grams to get final mass in g, use for k = 1\n cm_conversion = 2.84e37\n s_conversion = 8.53e47\n K_conversion = 6.52e36\n\n #boltzmann constant in cgs units\n k = 1.3807e-16\n\n G = 6.6743e-8\n\n #converting GeV to g\n mchi = mx / 5.62e23\n\n #m_acc = np.sqrt(3/(np.pi*(cm_conversion**3)*star.core_density))*((K_conversion*(cm_conversion**3)*star.core_temp)/((s_conversion**2)*G*mchi))**(3/2) #naturalized cgs units\n m_acc = np.sqrt( 3/(np.pi*(star.core_density)) )*(( (k*(star.core_temp)) / (G*mchi) )**(3/2)) #k constant cgs units\n #m_acc = ((3/(3.14*30))**(1/2)) * (((10**(-16)*10**7)/(10**(-8)*5.62*10**(-23)))**(3/2))\n\n #print(m_acc)\n\n return m_acc #in g\n\ndef chandrasekhar_mass(mx):\n\n #converting GeV to g\n mchi = mx / 5.62e23\n\n m_planck = 2.176434e-5 #in grams\n m_ch = (m_planck**3)/(mchi**2) #in grams\n\n return m_ch\n\ndef kaup_mass(mx):\n\n #converting GeV to g\n mchi = mx / 5.62e23\n\n m_planck = 2.176434e-5 #in grams\n m_k = 0.633*((m_planck**2)/mchi) #in grams\n\n return m_k\n\ndef DM_Mass_Capture(star, Mchi, rho_chi, vbar, sigma_xenon):\n\n #capture = float(captureN_pureH(star, Mchi, rho_chi, vbar, sigma_xenon))\n capture = float(capture_analytic(Mchi, star, rho_chi, vbar, sigma_xenon))\n mcap = capture * Mchi\n\n print(mcap)\n\n return mcap\n\ndef Bondi_Rate(star):\n\n mbondi = 4*np.pi*((G**2*star.mass**2)/c**3)*star.core_density\n\n print(mbondi)\n\n return mbondi\n\ndef Hawking_Radiation(star, Mchi, rho_chi, vbar, sigma_xenon, t_1):\n\n T_H = 1/(8*np.pi*G*M_BH(star, Mchi, rho_chi, vbar, sigma_xenon, t_1))\n #mh = (n.pi**2/30)*g_eff*T_H**4*(4*np.pi*rx_Eff_SP(star, Mchi)**2)\n mh = (n.pi**2/60)*T_H**4*(4*np.pi*rx_Eff_SP(star, Mchi)**2)\n print(mh)\n\n return mh\n\ndef M_BH(star, Mchi, rho_chi, vbar, sigma_xenon, t_1):\n\n mcap = DM_Mass_Capture(star, Mchi, rho_chi, vbar, sigma_xenon)\n mbondi = Bondi_Rate(star)\n mh = Hawking_Radiation(star, Mchi, rho_chi, vbar, sigma_xenon, t_1)\n\n print(mcap)\n print(mbondi)\n print(mh)\n\n #Differential equation function\n dMBHdt = lambda t, MBH, mcap = mcap, mbondi = mbondi, mh = mh: mcap + mbondi #- mh\n \n #Nx(t)\n sol = solve_ivp(dMBHdt, (0, t_1), [0], t_eval = [t_1])\n \n #Nx_t1 = # Of DM particles at t1\n MBH_t1 = sol.y[0][0]\n\n print(MBH_t1)\n\n return MBH_t1\n\ndef M_core_asymmetric(Mchi, star, rho_chi, vbar, sigma_xenon):\n \n mx = Mchi / 5.62e23\n \n cap = float(capture_analytic(Mchi, star, rho_chi, vbar, sigma_xenon))\n mass = cap * mx * star.lifetime * 3.15e7\n \n return mass\n\ndef sigma_BH(star, Mchi, rho_chi, vbar): #Takes M, R and L in solar units\n\n\n #Self-gravitating mass limit\n M_sg = self_gravitation_cond(star, Mchi)\n\n #First guess for sigma based on Xenon1T bounds\n sigma = (1.26*10**(-40))*(Mchi/(10**8))\n\n\n #First guess for M_DM\n M_DM = M_core_asymmetric(Mchi, star, rho_chi, vbar, sigma)\n\n #Sigma is found when log(Ctot_num) - log(Ctot_true) becomes less than stipulated\n # See Eq. 2.2-2.3 of companion paper for conditions\n while(abs(np.log10(M_DM) - np.log10(M_sg)) > 0.004):\n\n #Rate at which sigma is multiplied/divided by to get closer and closer to true Capture Rate\n #See Eq. 2.4 of companion paper\n rate = abs(np.log10(M_DM) - np.log10(M_sg))*10\n\n #Tells whether divide or multiply by rate depending on if our guess is too big or too small\n if (M_DM/M_sg > 1):\n sigma = sigma * (1/rate)\n else:\n sigma = sigma * rate\n\n # Recalculates new guess for M_DM\n M_DM = M_core_asymmetric(Mchi, star, rho_chi, vbar, sigma)\n \n\n return sigma\n\n\n################################################################################################################\n#Plotting\n\n#~~~~~~~~ Stellar PARAMS ~~~~~~~~~~~~~~~~~\n\n#Stellar Data\nL = np.power(10,[6.8172, 7.3047])\nM = [300, 1000]\nR = np.power(10,[0.8697, 1.1090])\n\n\n#~~~~~~~~ DM PARAMS ~~~~~~~~~~~~~~~~~\n\nvbar = 10**6\nrho_chi_sigV = 10**14\nann_type = 22 #2-->2 annihilations\n\nrho_chi_list = [10**13, 10**16]\n\n\n#Fraction of star's lifetime for equilibration\nfrac_tau = 0.01\n\n#Using lower bounds on sigv throughout\nunitary = False\nthermal = True\n\n\n#Orders of magnitude from 10^19 to get Densities\nrho_chi_adjust = [10**6, 10**3]\n\n\n#~~~~~~~~~~~ WD PARAMS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nSun = PopIIIStar(1, 1, 1, 1.5*10**7, 150, 10**10)\nWD = PopIIIStar(1, .015, 10**-2, 10**5, 10**6, 10**10)\n\n#~~~~~~~~~~~~~~~~ CALCULATING POP III BOUNDS ON SIGMA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nplottype = input(\"Enter 'pop iii' for black hole exclusion with Pop III star parameters.\\nEnter 'wd' for a reproduction of the black hole exclusion from 1012.2039.\\n\" +\n \"Enter 'Nx' to look at Nx values using 1012.2039 functions.\\nEnter 'fig 1' for fig 1 replicated by reading data from the original plot.\\n\" +\n \"Enter 'Nx pop iii' for Nx values attained by Pop III stars.\\nEnter 'final' for polished Nx Pop III exclusion plots.\\n\" +\n \"Enter 'Nx comp' for a comparison of differential vs, analytic functions for nonannihilating DM.\\n\" +\n \"Enter 'ellis params' to implement BH mass accretion rates from Sebastian Ellis.\\n\" + \n \"Enter 'ellis sigma' for a sigma exclusion plot using Ellis parameters.\\n\\n\")\n\nif plottype == 'pop iii':\n\n #Figure Formatting\n fig = plt.figure(figsize = (12, 10))\n plt.style.use('fast')\n palette = plt.get_cmap('viridis')\n\n\n\n E = 0\n\n mchi_dat = np.logspace(0, 4, 10)\n rho_chis = [10**13, 10**16]\n rho_adjust = [10**6, 10**3]\n sigma = [[],[]]\n mchi = [[],[]]\n\n sd = [[],[]]\n\n\n #Looping over all Stellar Masses\n for i in range(0, len(rho_chis)):\n\n #Color formatting of plot\n colors = palette(i/len(rho_chis))\n area_color = list(colors)\n area_color[3] = 0.2\n\n\n #Looping over all DM masses\n for k in range(0, len(mchi_dat)):\n\n print('working on mx = ' + str(mchi_dat[k]))\n\n temp = sigma_Nx_Ca(M300, mchi_dat[k], 10**19, vbar, 10**5) * rho_adjust[i]\n #Nx = N_chi_func_22(mchi_dat[k], temp, M300, rho_chis[i], vbar, 0)\n Nx = Nx_t_diff_Ca(mchi_dat[k], rho_chis[i], vbar, temp, M300, 10**5)\n\n\n #if Nx > (5*10**48)*(mchi_dat[k]/10**3)**-3:\n\n sigma[i].append(temp)\n mchi[i].append(mchi_dat[k])\n\n sd[i].append(10**-39 * mchi_dat[k])\n\n\n #Plotting\n plt.plot(mchi[i], sigma[i], color = area_color, label = '$\\\\rho_\\chi$ = ' + str(rho_chis[i]))\n plt.plot(mchi_dat, sd[i], color = area_color, label = '$\\\\rho_\\chi$ = ' + str(rho_chis[i]))\n\n\n\n #~~~~~~~~~~~~~~~~~~ FINAL PLOT FORMATTING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n #print(mchi)\n #print(sigma)\n \n plt.yscale('log')\n plt.xscale('log')\n plt.xlabel('$m_\\chi$ [GeV]', fontsize = 15)\n plt.xlim(mchi_dat[0], mchi_dat[-1])\n #plt.ylim(10**-50, 10**-30)\n plt.ylabel('$\\sigma$ [$cm^{2}$]', fontsize = 15)\n plt.title('Reproduction of Figure 1 from 1012.2039 to Fit Pop III Stars')\n plt.legend(loc = 'best', ncol = 2) \n plt.savefig('fig1_popiii_reproduce.png', dpi = 200)\n plt.show()\n\n\nelif plottype == 'wd':\n\n #Figure Formatting\n fig = plt.figure(figsize = (12, 10))\n plt.style.use('fast')\n palette = plt.get_cmap('viridis')\n\n\n\n E = 0\n\n mchi_dat = np.logspace(1, 5, 20)\n rho_chis = [10**3, 10**4]\n rho_adjust = [10**16, 10**15]\n sigma = [[],[]]\n mchi = [[],[]]\n\n sd = [[],[]]\n\n\n #Looping over all Stellar Masses\n for i in range(0, len(rho_chis)):\n\n #Color formatting of plot\n colors = palette(i/len(rho_chis))\n area_color = list(colors)\n area_color[3] = 0.2\n\n\n #Looping over all DM masses\n for k in range(0, len(mchi_dat)):\n\n temp = sigma_Nx(Sun, mchi_dat[k], 10**19, vbar, 10**10) * rho_adjust[i]\n Nx = Nx_analytic(mchi_dat[k], rho_chis[i], vbar, temp, Sun, 10**10)\n\n\n sigma[i].append(temp)\n mchi[i].append(mchi_dat[k])\n\n sd[i].append(10**-39 * mchi_dat[k])\n\n\n #Plotting\n plt.plot(mchi[0], sigma[0], color = 'dimgray', label = '$\\\\rho_\\chi$ = $10^{3}$')\n plt.plot(mchi[1], sigma[1], color = 'dimgray', ls = '--', label = '$\\\\rho_\\chi$ = $10^{4}$')\n\n plt.plot(mchi_dat, sd[0], color = area_color, label = 'SD Direct Detection Bound')\n plt.fill_between(mchi_dat, sd[0], 10**-10, color = 'dimgray')\n\n\n slope, intercept = np.polyfit(np.log(mchi[0]), np.log(sigma[0]), 1)\n print(\"slope: \" + str(slope))\n print(\"intercept: \" + str(intercept))\n\n\n #~~~~~~~~~~~~~~~~~~ FINAL PLOT FORMATTING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n #print(mchi)\n #print(sigma)\n \n plt.yscale('log')\n plt.xscale('log')\n plt.xlabel('$m_\\chi$ [GeV]', fontsize = 15)\n plt.xlim(mchi_dat[0], mchi_dat[-1])\n plt.ylim(plt.ylim()[0], 10**-20)\n plt.ylabel('$\\sigma$ [$cm^{2}$]', fontsize = 15)\n plt.title('Reproduction of Figure 1 from 1012.2039 with a Typical WD ')\n plt.legend(loc = 'best', ncol = 2)\n plt.savefig('fig1_wd_reproduce.png', dpi = 200)\n plt.show()\n\n\nelif plottype == 'Nx':\n\n #Figure Formatting\n fig = plt.figure(figsize = (12, 10))\n plt.style.use('fast')\n palette = plt.get_cmap('viridis')\n\n\n\n E = 0\n\n timespan = np.logspace(0, 10, 60)\n rho_chis = [10**3, 10**4]\n sigma = 10**-42\n mchi = 10**4\n\n Nx = [[],[]]\n Nx_limit = np.full(60, (5*10**48)*(mchi/10**3)**-3)\n\n\n\n #Looping over all Stellar Masses\n for i in range(0, len(rho_chis)):\n\n #Color formatting of plot\n colors = palette(i/len(rho_chis))\n area_color = list(colors)\n area_color[3] = 0.2\n\n\n #Looping over all DM masses\n for k in range(0, len(timespan)):\n\n Nx[i].append(Nx_t_diff(mchi, rho_chis[i], vbar, sigma, WD, timespan[k]))\n\n\n\n #Plotting\n plt.plot(timespan, Nx[i], color = area_color, label = '$\\\\rho_\\chi$ = ' + str(rho_chis[i]))\n\n\n plt.plot(timespan, Nx_limit, color = 'k', ls = '--', label = '$N_\\chi$ Limit')\n\n\n #~~~~~~~~~~~~~~~~~~ FINAL PLOT FORMATTING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n #print(mchi)\n #print(sigma)\n \n plt.yscale('log')\n plt.xscale('log')\n plt.xlabel('time [yrs]', fontsize = 15)\n plt.xlim(timespan[0], timespan[-1])\n #plt.ylim(10**-50, 10**-30)\n plt.ylabel('$N_\\chi$', fontsize = 15)\n plt.title('Reproduction of $N_\\chi$ Values from 1012.2039')\n plt.legend(loc = 'best', ncol = 2) \n plt.savefig('nx_reproduce.png', dpi = 200)\n plt.show()\n\n\nelif plottype == 'fig 1':\n\n #Figure Formatting\n fig = plt.figure(figsize = (12, 10))\n plt.style.use('fast')\n palette = plt.get_cmap('plasma')\n\n\n\n mchi1 = []\n sig1 = []\n with open('fig1_read_data.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n mchi1.append(float(row[0]))\n sig1.append(float(row[1]))\n\n mchi2 = []\n sig2 = []\n with open('fig1_read_data2.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n mchi2.append(float(row[0]))\n sig2.append(float(row[1]))\n\n mchi_dat = np.logspace(0, 9, 60)\n rho_chis = [10**3, 10**4]\n rho_adjust = [10**16, 10**15]\n sd = []\n\n\n #Looping over all DM Masses\n for i in range(0, len(mchi_dat)):\n\n sd.append(10**-39 * mchi_dat[i])\n\n for i in range(0, len(mchi1)):\n\n mchi1[i] = 10**mchi1[i]\n sig1[i] = 10**sig1[i]\n\n\n for i in range(0, len(mchi2)):\n\n mchi2[i] = 10**mchi2[i]\n sig2[i] = 10**sig2[i]\n\n\n #Plotting\n plt.plot(mchi1, sig1, color = 'r', ls = '--', label = '$\\\\rho_\\chi$ = $10^{4}$' )\n plt.plot(mchi2, sig2, color = 'r', label = '$\\\\rho_\\chi$ = $10^{3}$')\n plt.plot(mchi_dat, sd, color = 'b', label = 'SD Direct Detection Bounds')\n\n\n slope, intercept = np.polyfit(np.log(mchi1), np.log(sig1), 1)\n print(\"slope of px = 10^4: \" + str(slope))\n print('intercept: ' + str(intercept))\n\n slope2, intercept2 = np.polyfit(np.log(mchi2), np.log(sig2), 1)\n print(\"slope of px = 10^3: \" + str(slope2))\n print('intercept: ' + str(intercept2))\n\n\n########################################################################################################\n#\n# SLOPE OF BH EXCLUSIONS:\n# 10^3 --> -1.0074196757915095\n# 10^4 --> -1.0146510395831208\n#\n########################################################################################################\n\n #~~~~~~~~~~~~~~~~~~ FINAL PLOT FORMATTING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n \n plt.yscale('log')\n plt.xscale('log')\n plt.xlabel('$m_\\chi$ [GeV]', fontsize = 15)\n plt.xlim(mchi_dat[0], mchi_dat[-1])\n #plt.ylim(10**-42, 10**-30)\n plt.ylabel('$\\sigma$ [$cm^{2}$]', fontsize = 15)\n plt.title('Reproduction of Figure 1 from 1012.2039')\n plt.legend(loc = 'best', ncol = 2)\n plt.savefig('fig1_read_data_plotted.png', dpi = 200)\n plt.show()\n\n\n\nelif plottype == 'Nx pop iii':\n\n #Figure Formatting\n fig = plt.figure(figsize = (12, 10))\n plt.style.use('fast')\n palette = plt.get_cmap('viridis')\n\n\n\n E = 0\n\n timespan = np.logspace(0, 6, 50)\n rho_chis = [10**13, 10**16]\n sigma = 10**-42\n mchi = 10**4\n mx = np.logspace(1, 5, 50)\n\n Nx = [[],[]]\n Nx_eq = []\n Nx_limit = np.full(50, (5*10**48)*(mchi/10**3)**-3)\n\n\n\n #Looping over all Stellar Masses\n for i in range(0, len(rho_chis)):\n\n #Color formatting of plot\n colors = palette(i/len(rho_chis))\n area_color = list(colors)\n area_color[3] = 0.2\n\n\n #Looping over all DM masses\n for k in range(0, len(timespan)):\n\n Nx[i].append(Nx_t_diff(mchi, rho_chis[i], vbar, sigma, M300, timespan[k]))\n\n\n #Plotting\n plt.plot(timespan, Nx[i], color = area_color, label = '$\\\\rho_\\chi$ = ' + str(rho_chis[i]))\n\n\n plt.plot(timespan, Nx_limit, color = 'k', ls = '--', label = '$N_\\chi$ Limit')\n\n\n #~~~~~~~~~~~~~~~~~~ FINAL PLOT FORMATTING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n #print(mchi)\n #print(sigma)\n \n plt.yscale('log')\n plt.xscale('log')\n plt.xlabel('time [yrs]', fontsize = 15)\n plt.xlim(timespan[0], timespan[-1])\n #plt.ylim(10**-50, 10**-30)\n plt.ylabel('$N_\\chi$', fontsize = 15)\n plt.title('$N_\\chi$ Values Over Time in Pop III Stars')\n plt.legend(loc = 'best', ncol = 2) \n plt.savefig('nx_popiii.png', dpi = 200)\n\n\n #~~~~~~~~~~~~~~~~~ FIG 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n #Figure Formatting\n fig = plt.figure(figsize = (12, 10))\n plt.style.use('fast')\n palette = plt.get_cmap('viridis')\n\n Nx_limit = []\n\n for i in range(0, len(mx)):\n\n Nx_eq.append(N_chi_func_22(mx[i], sigma, M300, 10**14, vbar, 0))\n Nx_limit.append((5*10**48)*(mx[i]/10**3)**-3)\n\n plt.plot(mx, Nx_eq, color = 'r', label = '$N_\\chi$ at Equilibrium')\n plt.plot(mx, Nx_limit, color = 'k', ls = '--', label = '$N_\\chi$ Limit')\n\n\n #~~~~~~~~~~~~~~~~~~ FINAL PLOT FORMATTING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n #print(mchi)\n #print(sigma)\n \n plt.yscale('log')\n plt.xscale('log')\n plt.xlabel('$m_\\chi$ [GeV]', fontsize = 15)\n plt.xlim(mx[0], mx[-1])\n #plt.ylim(10**-50, 10**-30)\n plt.ylabel('$N_\\chi$', fontsize = 15)\n plt.title('$N_\\chi$ Values at Equilibrium in Pop III Stars')\n plt.legend(loc = 'best', ncol = 2) \n plt.savefig('nx_mx_popiii.png', dpi = 200)\n plt.show()\n\n\nelif plottype == 'final':\n\n #Figure Formatting\n fig = plt.figure(figsize = (12, 10))\n plt.style.use('fast')\n palette = plt.get_cmap('viridis')\n\n\n #~~~~~~~~ DM PARAMS ~~~~~~~~~~~~~~~~~\n\n #Definition of DM mass ranges\n mchi_xenon = np.logspace(2.9, 15, 16)\n mchi_nf = np.logspace(2.9, 15, 16)\n mchi_pico = np.logspace(2.9, 15, 16)\n mchi = np.logspace(1, 10, 30)\n\n #Orders of magnitude from 10^19 to get Densities\n rho_chi_adjust = [10**6, 10**3]\n\n\n #~~~~~~~~~~ SIGMA DD BOUNDS DATA ~~~~~~~~~~~~~~~~\n\n #Reading PICO-60 SD Data\n mchi_P60_dat = []\n sigma_P60_dat = []\n with open('PICO60_SD.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n mchi_P60_dat.append(float(row[0]))\n sigma_P60_dat.append(float(row[1]))\n\n\n #Reading PICO-60 NF Data\n mchi_P60NF_dat = []\n sigma_P60NF_dat = []\n with open('NF_PICO60.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n mchi_P60NF_dat.append(float(row[0]))\n sigma_P60NF_dat.append(float(row[1]))\n\n\n #~~~~~~~~~~~~~~~~ CALCULATING POP III BOUNDS ON SIGMA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n M = [300]\n\n #Looping over all Stellar Masses\n for i in range(0, len(M)):\n\n #Color formatting of plot\n colors = palette(i/len(M))\n area_color = list(colors)\n area_color[3] = 0.2\n\n #Re-initalizing Sigma for each star\n sigma = []\n\n #Looping over all densities\n for j in range(0, len(rho_chi_adjust)):\n\n #Each density has a list of sigma within a list\n sigma.append([])\n\n #Looping over all DM masses\n for k in range(0, len(mchi)):\n\n print('working on mx = ' + str(mchi[k]))\n\n sigma[j].append(sigma_Nx(stars_list[i], mchi[k], 10**19, vbar, 10**6) * rho_chi_adjust[j])\n\n\n #Undertainty region of Rho_chi, featuring two lines and a shaded region\n plt.plot(mchi, sigma[0], color = 'dimgray', label = str(M[i]) + '$M_\\odot$, $\\\\rho_\\chi = 10^{13}$')\n plt.plot(mchi, sigma[1], ls = '--', color = 'dimgray', label = str(M[i]) + '$M_\\odot$, $\\\\rho_\\chi = 10^{16}$')\n plt.fill_between(mchi, sigma[0], 10**-25, color = area_color)\n plt.fill_between(mchi, sigma[1], 10**-25, color = area_color)\n\n\n #~~~~~~~~~~~~~ PLOTTING DD BOUNDS ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n #PICO bounds, Current and NF from 10^3 - 10^16\n sigma_pico = 3.42e-40*(mchi_pico/10**3)\n sigma_picoNF = 8.67e-47*(mchi_nf)\n\n #PICO60 Data up to ~ 10^3\n #plt.plot(mchi_P60_dat, sigma_P60_dat, color ='honeydew', ls = '-', linewidth = 1)\n plt.fill_between(mchi_P60_dat, sigma_P60_dat, 10**-25, color = 'dimgray')\n\n #PICO60 FIT\n #plt.plot(mchi_pico, sigma_pico, color ='honeydew', ls = '-', linewidth = 1, label = 'PICO-60 SD Bounds')\n plt.fill_between(mchi_pico, sigma_pico, 10**-25, color = 'dimgray')\n plt.text(10**4, 10**-34, \"PICO-60\", color = 'white', fontsize = '12')\n\n #PICO_NF Data up to ~ 10^3\n plt.plot(mchi_P60NF_dat, sigma_P60NF_dat, color = 'k', ls = '-', linewidth = 1.5)\n\n #PICO_NF FIT\n plt.plot(mchi_nf, sigma_picoNF, color = 'k', ls = '-', linewidth = 1.5, label = 'PICO SD Neutrino Floor')\n\n\n plt.yscale('log')\n plt.xscale('log')\n plt.xlabel('$m_\\chi$ [GeV]', fontsize = 15)\n plt.xlim(mchi[0], mchi[-1])\n plt.ylim(plt.ylim()[0], 10**-30)\n plt.ylabel('$\\sigma$ [$cm^2$]', fontsize =15)\n plt.title('BH Exclusion Bounds on $\\sigma$ for Non-Annihilating DM, Varying $M_\\star$ and $\\\\rho_\\chi$')\n plt.legend(loc = 'best', ncol = 2)\n plt.savefig('sigma_mchi_BH_Exclusion.png', dpi = 200)\n plt.show()\n\n\n\n## #~~~~~~~~~~~~~~~~~ FIG 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n##\n## #Figure Formatting\n## fig = plt.figure(figsize = (12, 10))\n## plt.style.use('fast')\n## palette = plt.get_cmap('viridis')\n##\n##\n## #Looping over all Stellar Masses\n## for i in range(0, len(M)):\n##\n## #Color formatting of plot\n## colors = palette(i/len(M))\n## area_color = list(colors)\n## area_color[3] = 0.2\n##\n## #Re-initalizing Sigma for each star\n## sigma = []\n##\n## #Looping over all densities\n## for j in range(0, len(rho_chi_adjust)):\n##\n## #Each density has a list of sigma within a list\n## sigma.append([])\n##\n## #Looping over all DM masses\n## for k in range(0, len(mchi)):\n##\n## print('working on mx = ' + str(mchi[k]))\n##\n## sigma[j].append(sigma_Nx_Ca(stars_list[i], mchi[k], 10**19, vbar, 10**6) * rho_chi_adjust[j])\n##\n##\n## #Undertainty region of Rho_chi, featuring two lines and a shaded region\n## plt.plot(mchi, sigma[0], color = 'dimgray', label = str(M[i]) + '$M_\\odot$, $\\\\rho_\\chi = 10^{13}$')\n## plt.plot(mchi, sigma[1], ls = '--', color = 'dimgray', label = str(M[i]) + '$M_\\odot$, $\\\\rho_\\chi = 10^{16}$')\n## plt.fill_between(mchi, sigma[0], 10**-25, color = area_color)\n## plt.fill_between(mchi, sigma[1], 10**-25, color = area_color)\n##\n##\n## #~~~~~~~~~~~~~ PLOTTING DD BOUNDS ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n##\n## #PICO bounds, Current and NF from 10^3 - 10^16\n## sigma_pico = 3.42e-40*(mchi_pico/10**3)\n## sigma_picoNF = 8.67e-47*(mchi_nf)\n##\n## #PICO60 Data up to ~ 10^3\n## #plt.plot(mchi_P60_dat, sigma_P60_dat, color ='honeydew', ls = '-', linewidth = 1)\n## plt.fill_between(mchi_P60_dat, sigma_P60_dat, 10**-25, color = 'dimgray')\n##\n## #PICO60 FIT\n## #plt.plot(mchi_pico, sigma_pico, color ='honeydew', ls = '-', linewidth = 1, label = 'PICO-60 SD Bounds')\n## plt.fill_between(mchi_pico, sigma_pico, 10**-25, color = 'dimgray')\n## plt.text(10**4, 10**-34, \"PICO-60\", color = 'white', fontsize = '12')\n##\n## #PICO_NF Data up to ~ 10^3\n## plt.plot(mchi_P60NF_dat, sigma_P60NF_dat, color = 'k', ls = '-', linewidth = 1.5)\n##\n## #PICO_NF FIT\n## plt.plot(mchi_nf, sigma_picoNF, color = 'k', ls = '-', linewidth = 1.5, label = 'PICO SD Neutrino Floor')\n##\n##\n## plt.yscale('log')\n## plt.xscale('log')\n## plt.xlabel('$m_\\chi$ [GeV]', fontsize = 15)\n## plt.xlim(mchi[0], mchi[-1])\n## #plt.ylim(plt.ylim()[0], 10**-30)\n## plt.ylabel('$\\sigma$ [$cm^2$]', fontsize =15)\n## plt.title('BH Exclusion Bounds on $\\sigma$ for Annhilating DM, Varying $M_\\star$ and $\\\\rho_\\chi$')\n## plt.legend(loc = 'best', ncol = 2)\n## plt.savefig('sigma_mchi_BH_Exclusion_Annihilating.png', dpi = 200)\n## plt.show()\n\n\n\nelif plottype == 'Nx comp':\n\n #Figure Formatting\n fig = plt.figure(figsize = (12, 10))\n plt.style.use('fast')\n palette = plt.get_cmap('viridis')\n\n\n\n E = 0\n\n timespan = np.logspace(0, 10, 60)\n rho_chis = [10**3, 10**4]\n sigma = 10**-42\n mchi = 10**4\n\n Nx = []\n Nx_a = []\n Nx_limit = np.full(60, (5*10**48)*(mchi/10**3)**-3)\n\n\n #Looping over all DM masses\n for k in range(0, len(timespan)):\n\n Nx.append(Nx_t_diff(mchi, 10**4, vbar, sigma, Sun, timespan[k]))\n Nx_a.append(Nx_analytic(mchi, 10**4, vbar, sigma, Sun, timespan[k]))\n\n\n\n #Plotting\n plt.plot(timespan, Nx, color = 'b', label = 'Differential Nx')\n plt.plot(timespan, Nx_a, color = 'r', label = 'Analytic Nx')\n\n\n plt.plot(timespan, Nx_limit, color = 'k', ls = '--', label = '$N_\\chi$ Limit')\n\n\n #~~~~~~~~~~~~~~~~~~ FINAL PLOT FORMATTING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n #print(mchi)\n #print(sigma)\n \n plt.yscale('log')\n plt.xscale('log')\n plt.xlabel('time [yrs]', fontsize = 15)\n plt.xlim(timespan[0], timespan[-1])\n #plt.ylim(10**-50, 10**-30)\n plt.ylabel('$N_\\chi$', fontsize = 15)\n plt.title('Differential vs. Analytic $N_\\chi$ Values')\n plt.legend(loc = 'best', ncol = 2) \n plt.savefig('nx_comparison.png', dpi = 200)\n plt.show()\n\n\nelif plottype == \"ellis params\":\n\n #Figure Formatting\n fig = plt.figure(figsize = (12, 10))\n plt.style.use('fast')\n palette = plt.get_cmap('viridis')\n\n\n E = 0\n\n mchi = np.logspace(-3, 18, 30)\n M_acc = []\n M_ch = []\n M_K = []\n\n\n #Looping over all Stellar Masses\n for i in range(0, len(mchi)):\n\n temp = self_gravitation_cond(M100, mchi[i])\n temp2 = chandrasekhar_mass(mchi[i])\n temp3 = kaup_mass(mchi[i])\n\n M_sol = temp/1.9885e33\n M_sol2 = temp2/1.9885e33\n M_sol3 = temp3/1.9885e33\n\n M_acc.append(M_sol)\n M_ch.append(M_sol2)\n M_K.append(M_sol3)\n\n\n print(M_acc)\n \n #Plotting\n plt.plot(mchi, M_acc, color = \"red\", linewidth = 3, label = \"$M_{sg}$\")\n plt.plot(mchi, M_ch, color = \"black\", linewidth = 3, label = \"$M_{Ch}$\")\n plt.plot(mchi, M_K, color = \"lightblue\", linewidth = 3, label = \"$M_{Kaup}$\")\n\n\n slope, intercept = np.polyfit(np.log(mchi), np.log(M_acc), 1)\n print(\"slope: \" + str(slope))\n print(\"intercept: \" + str(intercept))\n\n\n #~~~~~~~~~~~~~~~~~~ FINAL PLOT FORMATTING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n #print(mchi)\n #print(sigma)\n \n plt.yscale('log')\n plt.xscale('log')\n plt.xlabel('$m_\\chi$ [GeV]', fontsize = 15)\n plt.xlim(mchi[0], mchi[-1])\n #plt.ylim(plt.ylim()[0], 10**-20)\n plt.ylabel('$M_{acc}$ [$M_\\odot$]', fontsize = 15)\n plt.title('Self-Gravitating Mass Limit for $M_\\star$ = 100 $M_\\odot$', fontsize = 18)\n plt.legend(loc = 'lower left', ncol = 1, fontsize = 15)\n plt.savefig('self_gravitating_exclusion.png', dpi = 200)\n plt.show()\n \n \n \n #Figure Formatting\n fig = plt.figure(figsize = (12, 10))\n plt.style.use('fast')\n palette = plt.get_cmap('viridis')\n\n\n E = 0\n\n mchi = np.logspace(1, 5, 28)\n M_acc = []\n M_ch = []\n M_K = []\n\n\n #Looping over all Stellar Masses\n for i in range(0, len(mchi)):\n\n temp = self_gravitation_cond(M100, mchi[i])\n temp2 = chandrasekhar_mass(mchi[i])\n temp3 = kaup_mass(mchi[i])\n\n M_sol = temp/1.9885e33\n M_sol2 = temp2/1.9885e33\n M_sol3 = temp3/1.9885e33\n\n M_acc.append(M_sol)\n M_ch.append(M_sol2)\n M_K.append(M_sol3)\n\n\n print(M_acc)\n \n \n mchi_dat = np.logspace(1, 5, 28)\n\n E = 0\n sigma = 10**-40\n M_DM = []\n \n for i in range(0, len(mchi_dat)):\n \n temp = M_core_asymmetric(mchi_dat[i], M100, 10**13, vbar, sigma)\n M_DM.append(temp/1.9885e33)\n \n plt.plot(mchi_dat, M_DM, color = \"pink\")\n \n \n\n # sigma = 10**-40\n # N_chi = [ [],[] ]\n # M_DM = [ [],[] ]\n\n\n\n # for j in range(0, len(rho_chi_list)):\n\n # #Looping over all DM masses\n # for k in range(0, len(mchi_dat)):\n\n # if mchi_dat[k] <= 10**6:\n # N_chi[j].append(N_chi_func_22(mchi_dat[k], sigma, M100, rho_chi_list[j], vbar, E))\n # #N_chi[i][j].append(Nx_t_diff(mchi_dat[k], rho_chi_list[j], vbar, sigma, star, 10**6))\n # M_DM[j].append(N_chi[j][k]*mchi_dat[k])\n\n # temp = M_DM[j][k]\n # M_DM[j][k] = temp * 1.78*10**-24 * 5.028*10**-34\n\n # else:\n # sigma = sigma_mchi_pureH(M100, mchi_dat[k], 10**19, vbar) * rho_chi_adjust[j]\n\n # N_chi[j].append(N_chi_func_22(mchi_dat[k], sigma, M100, rho_chi_list[j], vbar, E))\n # M_DM[j].append(N_chi[j][k]*mchi_dat[k])\n\n # temp = M_DM[j][k]\n # M_DM[j][k] = temp * 1.78*10**-24 * 5.028*10**-34\n\n\n # #Plotting\n # plt.plot(mchi_dat, M_DM[j], color = \"pink\")\n\n\n # plt.fill_between(mchi_dat, M_DM[0], M_DM[1], color = \"pink\", label = '$M_{DM}$, $\\\\rho_\\chi = 10^{13} - 10^{16}$')\n\n # slope, intercept = np.polyfit(np.log(mchi_dat), np.log(M_DM[1]), 1)\n # print(\"slope: \" + str(slope))\n \n #Plotting\n plt.plot(mchi, M_acc, color = \"red\", linewidth = 3, label = \"$M_{sg}$\")\n plt.plot(mchi, M_ch, color = \"black\", linewidth = 3, label = \"$M_{Ch}$\")\n plt.plot(mchi, M_K, color = \"lightblue\", linewidth = 3, label = \"$M_{Kaup}$\")\n\n\n slope, intercept = np.polyfit(np.log(mchi), np.log(M_acc), 1)\n print(\"slope: \" + str(slope))\n print(\"intercept: \" + str(intercept))\n\n\n #~~~~~~~~~~~~~~~~~~ FINAL PLOT FORMATTING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n #print(mchi)\n #print(sigma)\n \n plt.yscale('log')\n plt.xscale('log')\n plt.xlabel('$m_\\chi$ [GeV]', fontsize = 15)\n plt.xlim(mchi[0], mchi[-1])\n #plt.ylim(plt.ylim()[0], 10**-20)\n plt.ylabel('$M_{acc}$ [$M_\\odot$]', fontsize = 15)\n plt.title('Self-Gravitating Mass Limit for $M_\\star$ = 100 $M_\\odot$', fontsize = 18)\n plt.legend(loc = 'lower left', ncol = 1, fontsize = 15)\n #plt.savefig('self_gravitating_exclusion.png', dpi = 200)\n plt.show()\n\n\nelif plottype == \"ellis sigma\":\n \n #Figure Formatting\n fig = plt.figure(figsize = (12, 10))\n plt.style.use('fast')\n palette = plt.get_cmap('viridis')\n \n E = 0\n \n mchi = np.logspace(1, 5, 28)\n sigma = []\n \n for i in range(0, len(mchi)):\n temp = sigma_BH(M100, mchi[i], 10**13, vbar)\n sigma.append(temp)\n \n plt.plot(mchi, sigma)\n \n slope, intercept = np.polyfit(np.log(mchi), np.log(sigma), 1)\n print(\"slope: \" + str(slope))\n print(\"intercept: \" + str(intercept))\n \n \n plt.yscale('log')\n plt.xscale('log')\n plt.xlabel('$m_\\chi$ [GeV]', fontsize = 15)\n plt.xlim(mchi[0], mchi[-1])\n #plt.ylim(plt.ylim()[0], 10**-20)\n plt.ylabel('$\\sigma$ [$cm^2$]', fontsize = 15)\n plt.title('Self-Gravitating Mass Exclusion for $M_\\star$ = 100 $M_\\odot$', fontsize = 18)\n #plt.legend(loc = 'lower left', ncol = 1, fontsize = 15)\n #plt.savefig('self_gravitating_exclusion.png', dpi = 200)\n plt.show()\n\n\nelse:\n\n print(\"Enter a valid plot type.\")\n","repo_name":"mnmarkham/thinking-space","sub_path":"PY_BH_Exclusion.py","file_name":"PY_BH_Exclusion.py","file_ext":"py","file_size_in_byte":67349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24850192505","text":"from unicodedata import name\nfrom . import views\nfrom django.urls import path\n\nurlpatterns = [\n path('', views.adminpanel, name='adminpanel'),\n path('admin-login/', views.adminlogin, name='adminlogin'),\n path('user-list', views.userlist, name='userlist'),\n path('psychologistlist', views.psychologistlist, name='psychologistlist'),\n path('guestpsychologist', views.guestpsychologist, name='guestpsychologist'),\n path('adminsignin/', views.adminsignin, name='adminsignin'),\n path('adminsignout/', views.adminsignout, name='adminsignout'),\n path('verifypsychologist//', views.verifypsychologist, name='verifypsychologist')\n \n]","repo_name":"Vishnuvp1/Mindcare","sub_path":"adminpanel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22183046832","text":"\n# coding: utf-8\n\n# # Data Cleaning + Wrangling\n\n# ## Importing Libraries, Merging data sets, Handling duplicates and weird labels\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nget_ipython().magic('matplotlib inline')\n\n\n# In[2]:\n\n\ndata = pd.read_excel('hr_data_final.xlsx')\n\n\n# In[3]:\n\n\ndata2 = pd.read_excel('curr_employee.xls')\n\n\n# In[4]:\n\n\ndata3 = pd.read_excel('turnover.xls')\n\n\n# In[5]:\n\n\ndata4 = pd.read_excel('degree_job_class_filled.xls')\n\n\n# In[6]:\n\n\n# Dropping Duplicate Columns from data\ndata = data.loc[:,~data.columns.duplicated()]\n\n\n# In[7]:\n\n\n# Dropping duplicates and keeping the first ones\ndata = data.drop_duplicates(subset=['Tax ID','Ethnicity','Gender','Annual Salary','Age','Position Effective Date'])\n\n\n# In[8]:\n\n\n# Dropping duplicates and keeping the first ones\ndata2 = data2.drop_duplicates(subset=['Ethnicity','Gender','Annual Salary','Age','Position Effective Date'])\n\n\n# In[9]:\n\n\n# Dropping duplicates and keeping the first ones\ndata3 = data3.drop_duplicates(subset=['Gender','Annual Salary','Age','Position Effective Date'])\n\n\n# In[10]:\n\n\nfin_data = pd.merge(data3, data, on=['Ethnicity','Gender','Annual Salary','Age','Position Effective Date'],how='outer')\n\n\n# In[11]:\n\n\n# Dropping duplicates and keeping the first ones\nfin_data = fin_data.drop_duplicates(subset=['Gender','Ethnicity','Annual Salary','Age','Position Effective Date'])\n\n\n# In[12]:\n\n\nfor col in fin_data.columns[fin_data.columns.str.endswith('_x')].tolist():\n fin_data.loc[fin_data[col].isnull(),col] = fin_data[col[:-2]+\"_y\"]\n\n\n# In[13]:\n\n\nfor col in fin_data.columns[fin_data.columns.str.endswith('_y')].tolist():\n del fin_data[col]\n\n\n# In[14]:\n\n\nfor col in fin_data.columns[fin_data.columns.str.endswith('_x')].tolist():\n fin_data = fin_data.rename(columns={col:col[:-2]})\n\n\n# In[15]:\n\n\n# Deleting unnecessary columns in data\nfor col in ['Education Level Description.1','Annual Salary.1','Pay Frequency.1','Regular Pay Rate Amount.1','Education Level Description']:\n del fin_data[col]\n\n\n# In[16]:\n\n\n# Dropping duplicates and keeping the first ones\nfin_data = fin_data.drop_duplicates(subset=['Ethnicity','Gender','Annual Salary','Age','Position Effective Date'])\n\n\n# In[17]:\n\n\nfin_data = pd.merge(fin_data, data2,on=['Age','Annual Salary','Ethnicity','Gender','Position Effective Date'], how='left')\n\n\n# In[18]:\n\n\nfor col in fin_data.columns[fin_data.columns.str.endswith('_x')].tolist():\n fin_data.loc[fin_data[col].isnull(),col] = fin_data[col[:-2]+\"_y\"]\n\n\n# In[19]:\n\n\nfor col in fin_data.columns[fin_data.columns.str.endswith('_y')].tolist():\n del fin_data[col]\n\n\n# In[20]:\n\n\nfor col in fin_data.columns[fin_data.columns.str.endswith('_x')].tolist():\n fin_data = fin_data.rename(columns={col:col[:-2]})\n\n\n# In[21]:\n\n\ndel fin_data['Unnamed: 6']\n\n\n# In[22]:\n\n\ndel fin_data['Business Unit Description']\n\n\n# In[23]:\n\n\n# Dropping duplicates and keeping the first ones\nfin_data = fin_data.drop_duplicates(subset=['Gender','Ethnicity','Annual Salary','Age','Position Effective Date'])\n\n\n# In[24]:\n\n\ndata4['Education Level Code'] = data4['Education Level Description']\n\n\n# In[25]:\n\n\ndel data4['Education Level Description']\n\n\n# In[26]:\n\n\nfin_data = pd.merge(fin_data,data4[['Tax ID','Ethnicity','Gender','Annual Salary','Age','Position Effective Date',\n 'Education Level Code','EEOC Job Classification']], \n on=['Tax ID','Ethnicity','Gender','Annual Salary','Age','Position Effective Date'], how='left')\n\n\n# In[27]:\n\n\nfor col in fin_data.columns[fin_data.columns.str.endswith('_x')].tolist():\n fin_data.loc[fin_data[col].isnull(),col] = fin_data[col[:-2]+\"_y\"]\n\n\n# In[28]:\n\n\nfor col in fin_data.columns[fin_data.columns.str.endswith('_y')].tolist():\n del fin_data[col]\n\n\n# In[29]:\n\n\nfor col in fin_data.columns[fin_data.columns.str.endswith('_x')].tolist():\n fin_data = fin_data.rename(columns={col:col[:-2]})\n\n\n# In[30]:\n\n\nfin_data.info()\n\n\n# In[31]:\n\n\n# Dropping duplicates and keeping the first ones\nfin_data = fin_data.drop_duplicates(subset=['Gender','Ethnicity','Annual Salary','Age','Position Effective Date','Years of Service'])\n\n\n# In[32]:\n\n\nfin_data.info()\n\n\n# ## Missing Data\n\n# In[33]:\n\n\nfin_data['Age'] = fin_data['Age'].fillna(np.median(fin_data['Age']))\nfin_data['Ethnicity'] = fin_data['Ethnicity'].fillna('Hispanic or Latino')\nfin_data['Race'] = fin_data['Race'].fillna('Hispanic or Latino')\n\n\n# In[34]:\n\n\n# If two employees have same job classification, assumed they work same number of hours\nfin_data['Scheduled Hours'] = fin_data.groupby('EEOC Job Classification')['Scheduled Hours'].bfill().ffill()\n\n\n# In[35]:\n\n\nfin_data['Scheduled Hours'].value_counts().index[0]\n\n\n# In[36]:\n\n\n# new race code 10.0 denoting \"Hispanic or Lationo\"\nfin_data['Race Code'] = fin_data['Race Code'].fillna(10.0)\n\n\n# In[37]:\n\n\n# If two employees have same home department, Job Title Description and number of work hours, assumed their regular pay amount is equal\nfin_data['Regular Pay Rate Amount'] =fin_data.groupby(['EEOC Job Classification','Scheduled Hours','Home Department Description'])['Regular Pay Rate Amount'].bfill().ffill()\n\n\n# In[38]:\n\n\n# Fill in the still missing values with the most frequent value of that variable\nfin_data['Scheduled Hours'] = fin_data['Scheduled Hours'].fillna(fin_data['Scheduled Hours'].value_counts().index[0])\nfin_data['Regular Pay Rate Amount'] = fin_data['Regular Pay Rate Amount'].fillna(fin_data['Regular Pay Rate Amount'].value_counts().index[0])\n\n\n# In[39]:\n\n\ndel fin_data['Position Start Date']\n\n\n# In[40]:\n\n\ndel fin_data['Promotion Check']\n\n\n# In[41]:\n\n\ndel fin_data['Home Department Code']\n\n\n# In[42]:\n\n\n# If position effective data doesn't match hiring date, means the employee has been promoted\nfin_data['check'] = np.where(fin_data['Position Effective Date']!=fin_data['Hire Date'], 'Yes', 'No')\n\n\n# In[43]:\n\n\n# If position effective data doesn't match hiring date, means the employee has been promoted\nfin_data.loc[fin_data['Promotion'].isnull(),'Promotion'] =fin_data['check']\n\n\n# In[44]:\n\n\ndel fin_data['check']\n\n\n# In[45]:\n\n\ndel fin_data['Race Description']\n\n\n# In[46]:\n\n\n# If two employees have same home department, Job Classification and number of work hours, assumed their Supervisor is the same\nfin_data['Supervisor ID'] =fin_data.groupby(['EEOC Job Classification','Home Department Description','Scheduled Hours'])['Supervisor ID'].bfill().ffill()\n\n\n# In[47]:\n\n\nfin_data['Supervisor Name'] = fin_data['Reports To First Name'] + \" \" + fin_data['Reports To Last Name']\n\n\n# In[48]:\n\n\n# If two employees have the smae supervisor ID, then the supervisor's name would be the same\nfin_data['Supervisor Name'] =fin_data.groupby(['Supervisor ID'])['Supervisor Name'].bfill().ffill()\n\n\n# In[49]:\n\n\n# If two employees have same anuual salary, scheduled number of hours and regular pay amount, \n# assumed their job classification would be the same\nfin_data['EEOC Job Classification'] =fin_data.groupby(['Annual Salary','Scheduled Hours','Regular Pay Rate Amount'])['EEOC Job Classification'].bfill().ffill()\n\n\n# In[50]:\n\n\ndel fin_data['Current Date']\n\n\n# In[51]:\n\n\ndel fin_data['Job Change Reason Code']\n\n\n# In[52]:\n\n\ndel fin_data['Reports To First Name']\ndel fin_data['Reports To Last Name']\n\n\n# In[53]:\n\n\ndel fin_data['Benefits Eligibility Class Code']\n\n\n# In[54]:\n\n\n# If two employees have the same Job Classification, number of work hours, regular pay amount, and annual salary,\n# assumed their job titles would be the same\nfin_data['Job Title Description'] =fin_data.groupby(['EEOC Job Classification','Scheduled Hours','Regular Pay Rate Amount','Annual Salary'])['Job Title Description'].bfill().ffill()\n\n\n# In[55]:\n\n\ndel fin_data['Pay Frequency']\n\n\n# In[56]:\n\n\n# If two employees have the same Job Classification, job description, regular pay amount, and annual salary, \n# assumed FLSA Code would be the same\nfin_data['FLSA Code'] =fin_data.groupby(['EEOC Job Classification','Job Title Description','Regular Pay Rate Amount','Annual Salary'])['FLSA Code'].bfill().ffill()\n\n\n# In[57]:\n\n\nfin_data['FLSA Code'] = fin_data['FLSA Code'].replace({' ':np.nan})\n\n\n# In[58]:\n\n\n# If two employees have the same Job Classification, job description, regular pay amount, and annual salary, \n# assumed FLSA Code would be the same\nfin_data['FLSA Code'] =fin_data.groupby(['EEOC Job Classification','Job Title Description','Regular Pay Rate Amount','Annual Salary'])['FLSA Code'].bfill().ffill()\n\n\n# In[59]:\n\n\n# If two employees have the same Annual Salary, job description, and regular pay amount, assumed Education Level would be the similar(same)\nfin_data['Education Level Code'] =fin_data.groupby(['Job Title Description','Regular Pay Rate Amount','Annual Salary'])['Education Level Code'].bfill().ffill()\n\n\n# In[60]:\n\n\n# If two employees have the same supervisor, job classification and job description, assumed Home Department would be the same\nfin_data['Home Department Description'] =fin_data.groupby(['Supervisor ID','EEOC Job Classification','Job Title Description'])['Home Department Description'].bfill().ffill()\n\n\n# In[61]:\n\n\n# Column that indicates whether this employee is still in Family Services or has been terminated(left)\nfin_data['Left'] = np.where(fin_data['Termination Date'].notnull(), 1, 0)\n\n\n# In[62]:\n\n\n# Fill null with today's timestamp\nfrom pandas import Timestamp\nfin_data['Termination Date'] = fin_data['Termination Date'].fillna(Timestamp.today())\n\n\n# In[63]:\n\n\n# Years of service in timedelta\nfin_data['timedelta'] = fin_data['Termination Date'] - fin_data['Hire Date']\n\n\n# In[64]:\n\n\n# Converting Years of service in timedelta into number of years\nfin_data['timedelta'] = (fin_data['timedelta'].dt.days)/365\n\n\n# In[65]:\n\n\nfin_data.loc[fin_data['Years of Service'].isnull(),'Years of Service']=fin_data['timedelta']\n\n\n# In[66]:\n\n\ndel fin_data['timedelta']\n\n\n# In[67]:\n\n\ndel fin_data['Termination Date']\n\n\n# In[68]:\n\n\n# If two employees have the same job classification, job description, and annual salary assumed Benefits Eligibility would be the same\nfin_data['Benefits Eligibility Class Description'] =fin_data.groupby(['EEOC Job Classification','Job Title Description','Annual Salary'])['Benefits Eligibility Class Description'].bfill().ffill()\n\n\n# In[69]:\n\n\ndel fin_data['Years in Current Position']\n\n\n# In[70]:\n\n\n# Filling in missing information of categorical variables with \"Not Reported\"\nfor col in ['Supervisor ID','Supervisor Name','FLSA Code','Home Department Description','Job Title Description',\n 'Payroll Company Code','Education Level Code','Education Level Code',\n 'Benefits Eligibility Class Description']:\n fin_data[col] = fin_data[col].fillna('Not Reported')\n\n\n# In[71]:\n\n\nfin_data['EEOC Job Classification'] = fin_data['EEOC Job Classification'].replace({'Not reported': 'Not Reported'})\n\n\n# In[72]:\n\n\nfin_data['FLSA Code'] = fin_data['FLSA Code'].replace({' ': 'Not Reported'})\n\n\n# In[73]:\n\n\nfin_data = fin_data.reset_index()\n\n\n# In[74]:\n\n\n# Dropping duplicates and keeping the first ones\ndata = data.drop_duplicates(subset=['Age','Race','Ethnicity','Annual Salary','Scheduled Hours','Regular Pay Rate Amount'])\n\n\n# In[75]:\n\n\ndel fin_data['index']\n\n\n# In[76]:\n\n\n# Changing datatype of Hire Date into \"datetime\" for convenience\nfin_data['Hire Date'] = pd.to_datetime(fin_data['Hire Date'])\n\n\n# In[77]:\n\n\nfin_data['Hire Year'] = data['Hire Date'].dt.year\n\n\n# In[82]:\n\n\nfin_data['Hire Year'] = fin_data['Hire Year'].astype('int64',errors='ignore')\n\n\n# In[79]:\n\n\nfin_data['Hire Month'] = fin_data['Hire Date'].dt.month\n\n\n# In[85]:\n\n\nfin_data['Hire Month'] = fin_data['Hire Month'].astype('int64',errors='ignore')\n\n\n# In[86]:\n\n\nfin_data.info()\n\n\n# ## Saving into CSV Files\n\n# In[87]:\n\n\n# Save cleaned data as a new file\n\nfin_data.to_csv('clean_data.csv')\n\n\n# In[88]:\n\n\n# Current Employees\ncurr_emp = fin_data[fin_data['Left']==0]\n\n\n# In[89]:\n\n\ncurr_emp.to_csv('curr_emp.csv')\n\n\n# In[90]:\n\n\n# Terminated Employees\nleft_emp = fin_data[fin_data['Left']==1]\n\n\n# In[91]:\n\n\nleft_emp.to_csv('left_emp.csv')\n\n\n# In[92]:\n\n\nprint(fin_data.shape, curr_emp.shape, left_emp.shape)\n\n\n# 652 current employees and 2652 terminated employees\n","repo_name":"Seungjun-Data-Science/Family-Services-HR-Analytics-Project","sub_path":"Data Cleaning + Wrangling.py","file_name":"Data Cleaning + Wrangling.py","file_ext":"py","file_size_in_byte":12172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14416048430","text":"from http.server import ThreadingHTTPServer, SimpleHTTPRequestHandler\nimport ast\nimport json\n\nhostName = \"localhost\"\nserverPort = 8081\n\nclass MyServer(SimpleHTTPRequestHandler):\n def do_POST(self):\n self.data_string = self.rfile.read(int(self.headers['Content-Length'])).decode()\n\n # self.protocol_version = 'HTTP/1.1'\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n text = str([\"aa\", \"aaa\"]).encode()\n self.send_header(\"Content-Length\", len(text))\n self.end_headers()\n self.wfile.write(text)\n return\n\nif __name__ == \"__main__\": \n webServer = ThreadingHTTPServer((hostName, serverPort), MyServer)\n print(\"Server started http://%s:%s\" % (hostName, serverPort))\n\n try:\n webServer.serve_forever()\n except KeyboardInterrupt:\n pass\n\n webServer.server_close()\n print(\"Server stopped.\")\n\n","repo_name":"cuban-digital-language/vectorial-information-retrieval-model","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43206284613","text":"from typing import Optional\n\nfrom telegram.ext import CommandHandler, CallbackQueryHandler\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup\nfrom django_telegrambot.apps import DjangoTelegramBot\nfrom . models import Motion, BotUsers, BotChat, Language, TelegramToken, User, Round, Room\n\nimport re\nimport logging\nimport datetime\n\nclass TabmakerBot:\n\n __CALLBACK_NEXT_MOTION_ACTION = 'next_motion'\n __CALLBACK_CHANGE_LANGUAGE_ACTION = 'LANG_'\n\n def __init__(self, logger):\n self.logger = logger\n\n\n def motion_handler(self, bot, update):\n user, chat = self.__get_or_create_user(update.message.from_user, update.message.chat)\n self.__send_motion(bot, update.message.chat_id, user, chat)\n\n\n def start_handler(self, bot, update):\n user, chat = self.__get_or_create_user(update.message.from_user, update.message.chat)\n\n token = update.message.text.split('/start')[1].strip()\n if token:\n connected_user = self.__connect_user(user, chat, token)\n bot.sendMessage(\n update.message.chat_id,\n text=('Канал привязан к пользователю %s' % connected_user.name()),\n )\n\n # TODO print help\n\n\n def language_handler(self, bot, update):\n self.__get_or_create_user(update.message.from_user, update.message.chat)\n\n bot.sendMessage(\n update.message.chat_id,\n text='Choose language:',\n reply_markup=InlineKeyboardMarkup(\n inline_keyboard=[\n [self.__build_language_buttons(language) for language in Language.objects.filter(is_public=True)]\n ],\n )\n )\n\n\n def callback_query_handler(self, bot, update):\n user, chat = self.__get_or_create_user(update.callback_query.from_user, update.callback_query.message.chat)\n lang_parser = re.match(\n self.__CALLBACK_CHANGE_LANGUAGE_ACTION + '(?P[0-9]+)',\n update.callback_query.data\n )\n\n if lang_parser:\n try:\n language = Language.objects.get(id=int(lang_parser.group('lang_id')))\n if chat:\n chat.language = language\n chat.save()\n else:\n user.language = language\n user.save()\n\n bot.sendMessage(\n update.callback_query.message.chat_id,\n text=(language.telegram_bot_label if language.telegram_bot_label else language.name),\n # reply_markup=get_keyboard()\n )\n except Exception as e:\n self.logger.error(e)\n pass\n # elif update.callback_query.data == self.__CALLBACK_NEXT_MOTION_ACTION:\n\n return self.__send_motion(bot, update.callback_query.message.chat_id, user, chat)\n\n\n def error_handler(self, bot, update, error):\n self.logger.warning('Update \"%s\" caused error \"%s\"' % (update, error))\n\n\n @staticmethod\n def send_round_notifications(bot, cur_round: Round, rooms: [Room]):\n motion = cur_round.motion.infoslide + '\\n\\n' + cur_round.motion.motion if cur_round.motion.infoslide \\\n else cur_round.motion.motion\n\n for room in rooms:\n room_message = \\\n '1П: ' + room.game.og.name + '\\n' + \\\n '1О: ' + room.game.oo.name + '\\n' + \\\n '2П: ' + room.game.cg.name + '\\n' + \\\n '2О: ' + room.game.co.name + '\\n\\n' + \\\n 'Судья: ' + room.game.chair.name() + '\\n\\n' + \\\n 'Аудитория: ' + room.place.place + '\\n\\n' + \\\n motion\n\n speakers = [\n room.game.chair\n ]\n\n for team in room.game.get_teams():\n for speaker in team.get_speakers():\n speakers.append(speaker)\n\n for speaker in speakers:\n if not speaker.telegram_id:\n continue\n\n bot.sendMessage(speaker.telegram_id, text=room_message)\n logging.info(speaker.telegram_id)\n logging.info(room_message)\n\n\n def __get_or_create_user(self, from_user, from_chat):\n user = None\n chat = None\n\n try:\n user, created = BotUsers.objects.update_or_create(id=from_user.id)\n\n user.username = from_user.username or ''\n user.first_name = from_user.first_name or ''\n user.last_name = from_user.last_name or ''\n\n user.save()\n\n if from_chat.id != from_user.id:\n chat, created = BotChat.objects.update_or_create(id=from_chat.id)\n chat.title = from_chat.title or ''\n chat.save()\n\n except Exception as e:\n self.logger.error(e)\n\n return user, chat\n\n\n def __send_motion(self, bot, chat_id, user, chat):\n motions = Motion.objects.filter(is_public=True)\n\n language = chat.language if chat else user.language\n if language:\n motions = motions.filter(language=language)\n\n motion = motions.order_by('?').first()\n\n self.logger.info('{} {} // {} | {} | {}'.format(user, chat, motion.id, motion.motion, motion.infoslide))\n\n message = motion.motion\n if motion.infoslide:\n message += '\\n' + '\\n' + motion.infoslide\n\n bot.sendMessage(chat_id, text=message, reply_markup=self.__build_motion_keyboard())\n\n\n def __build_language_buttons(self, language: Language) -> InlineKeyboardButton:\n return InlineKeyboardButton(\n language.telegram_bot_label if language.telegram_bot_label else language.name,\n callback_data=self.__CALLBACK_CHANGE_LANGUAGE_ACTION + str(language.id)\n )\n\n\n def __build_motion_keyboard(self) -> InlineKeyboardMarkup:\n return InlineKeyboardMarkup([\n [InlineKeyboardButton('Next motion', callback_data=self.__CALLBACK_NEXT_MOTION_ACTION)],\n ])\n\n\n def __connect_user(self, bot_user: BotUsers, chat: BotChat, token: str) -> Optional[User]:\n if chat is not None:\n self.logger.warning('')\n return None\n\n token: TelegramToken = TelegramToken.objects.filter(value=token).first()\n\n if token is None:\n self.logger.warning('')\n return None\n\n if token.expire < datetime.datetime.now():\n raise Exception('Время жизни токена закончилось, попробуйте заново')\n\n token.user.telegram = bot_user\n token.user.save()\n\n return token.user\n\n\ndef main():\n logger = logging.getLogger('TelegramBot')\n logger.debug('Loading handlers for telegram bot')\n\n telegram_bot = TabmakerBot(logger)\n dp = DjangoTelegramBot.dispatcher\n dp.add_handler(CommandHandler('start', telegram_bot.start_handler))\n dp.add_handler(CommandHandler('language', telegram_bot.language_handler))\n dp.add_handler(CommandHandler('motion', telegram_bot.motion_handler))\n dp.add_handler(CallbackQueryHandler(telegram_bot.callback_query_handler))\n dp.add_error_handler(telegram_bot.error_handler)\n","repo_name":"apanasenko/tabmaker","sub_path":"apps/tournament/telegrambot.py","file_name":"telegrambot.py","file_ext":"py","file_size_in_byte":7202,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"71270948105","text":"from flask import jsonify\nfrom dao.request import RequestDAO\nfrom dao.customer import CustomerDAO\nfrom dao.requestCategory import RequestCategoryDAO\n\nclass RequestHandler:\n\n def build_request_dict(self, row, resources):\n result = {}\n result['request_id'] = row[0]\n result['customer_id'] = row[1]\n result['request_title'] = row[2]\n result['request_date'] = row[3]\n result['request_description'] = row[4]\n result['request_status'] = row[5]\n result['resources'] = resources\n return result\n\n def build_request_attributes(self, request_id, customer_id, request_title, request_date, request_description, request_status, resources):\n result = {}\n result['request_id'] = request_id\n result['customer_id'] = customer_id \n result['request_title'] = request_title\n result['request_date'] = request_date\n result['request_description'] = request_description\n result['request_status'] = request_status\n result['resources'] = resources\n return result\n\n def build_resources_dict(self, row):\n result = {}\n result['category_id'] = row[0]\n result['category_name'] = row[1]\n result['request_quantity'] = row[2]\n return result\n\n def fixDict(self, request_list):\n result_list = []\n resources_list = []\n index = 1\n for row in request_list:\n if index < len(request_list) and row[0] == request_list[index][0]:\n resources_list.append(row[6:])\n else:\n resources_list.append(row[6:])\n result = self.build_request_dict(row[:6], self.createResourceDict(resources_list))\n result_list.append(result)\n resources_list.clear()\n index += 1\n return result_list\n\n def createResourceDict(self, resources_list):\n result_list = []\n for row in resources_list:\n result = self.build_resources_dict(row)\n result_list.append(result)\n return result_list\n \n def getAllRequests(self):\n dao = RequestDAO()\n request_list = dao.getAllRequests()\n result_list = self.fixDict(request_list)\n return jsonify(Requests = result_list)\n\n def getRequestById(self, request_id):\n dao = RequestDAO()\n row = dao.getRequestById(request_id)\n if not row:\n return jsonify(Error = \"Request Not Found\"), 404\n else:\n request = self.fixDict(row)\n return jsonify(Request = request)\n\n def getRequestsByCustomerId(self, customer_id):\n customer_dao = CustomerDAO()\n if not customer_dao.getCustomerById(customer_id):\n return jsonify(Error = \"Customer not found.\"), 404\n else:\n request_list = []\n request_dao = RequestDAO()\n request_list = request_dao.getRequestsByCustomerId(customer_id)\n result_list = self.fixDict(request_list)\n return jsonify(Requests = result_list)\n\n def searchRequests(self, args):\n request_title = args.get(\"request_title\")\n request_status = args.get(\"request_status\")\n category_name = args.get(\"category_name\")\n dao = RequestDAO()\n request_list = []\n if (len(args) == 1) and request_title:\n request_list = dao.getRequestsByTitle(request_title)\n elif (len(args) == 1) and request_status:\n request_list = dao.getRequestsByStatus(request_status)\n elif (len(args) == 1) and category_name:\n request_list = dao.getRequestsByCategoryName(category_name)\n else:\n return jsonify(Error = \"Malformed query string\"), 400\n result_list = self.fixDict(request_list)\n return jsonify(Requests = result_list)\n\n def insertRequest(self, json):\n customer_id = json[\"customer_id\"]\n request_title = json[\"request_title\"]\n request_date = json[\"request_date\"]\n request_description = json[\"request_description\"]\n request_status = \"Pending\"\n resources = json[\"resources\"]\n\n if customer_id and request_title and request_date and request_status and request_description and resources:\n request_dao = RequestDAO()\n request_category_dao = RequestCategoryDAO()\n request_id = request_dao.insert(customer_id, request_title, request_date, request_description, request_status)\n for item in resources:\n request_category_dao.insert(request_id, item[\"category_id\"], item[\"request_quantity\"])\n result = self.build_request_attributes(request_id, customer_id, request_title, request_date, request_description, request_status, resources)\n return jsonify(Request = result), 201\n else:\n return jsonify(Error = \"Unexpected attributes in post request\"), 400\n\n def updateRequest(self, request_id, json):\n request_dao = RequestDAO()\n if not request_dao.getRequestById(request_id):\n return jsonify(Error = \"Request not found.\"), 404\n else:\n customer_id = json[\"customer_id\"]\n request_title = json[\"request_title\"]\n request_date = json[\"request_date\"]\n request_description = json[\"request_description\"]\n request_status = json['request_status']\n resources = json[\"resources\"]\n\n if customer_id and request_title and request_date and request_status and request_description and resources:\n request_dao = RequestDAO()\n request_category_dao = RequestCategoryDAO()\n request_id = request_dao.update(request_id, customer_id, request_title, request_date, request_description, request_status)\n for item in resources:\n request_category_dao.update(request_id, item[\"category_id\"], item[\"request_quantity\"])\n result = self.build_request_attributes(request_id, customer_id, request_title, request_date, request_description, request_status, resources)\n return jsonify(Request = result), 200\n else:\n return jsonify(Error = \"Unexpected attributes in update request\"), 400\n\n def deleteRequest(self, request_id):\n request_dao = RequestDAO()\n request_category_dao = RequestCategoryDAO()\n if not request_dao.getRequestById(request_id):\n return jsonify(Error = \"Request not found.\"), 404\n else:\n request_category_dao.delete(request_id)\n request_dao.delete(request_id)\n return jsonify(DeleteStatus = \"OK\"), 200","repo_name":"Fernando1929/DisasterResources","sub_path":"backend/handler/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":6625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"16742615899","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom matplotlib.gridspec import GridSpec as gs\nimport seaborn as sns\nfrom scipy.interpolate import CubicSpline\nfrom matplotlib.gridspec import GridSpec\n\nplt.style.use('seaborn')\n\n\n# load data\n# asu house data\ndata = pd.read_csv('../../data/sites/asu_house.csv', converters={'Time':pd.to_datetime})\ndata = data[data['CPM']=='Off'] # excluding cpm\nopen_data = data[data['LandDrain']=='Open']\nclosed_data = data[data['LandDrain']=='Closed']\n\n# simulation data\nsim = pd.read_csv('../../data/simulations/diurnal.csv')\n\n# sorting simulation data by cases\nopen = sim[(sim['ConstAe']=='No') & (sim['Pathway']=='Yes')]\nopen_const = sim[(sim['ConstAe']=='Yes') & (sim['Pathway']=='Yes')]\nclosed = sim[(sim['ConstAe']=='No') & (sim['Pathway']=='No')]\nclosed_const = sim[(sim['ConstAe']=='Yes') & (sim['Pathway']=='No')]\n\nfor df in (open_data, closed_data):\n r = df.resample('1D', on='Time', kind='timestamp')\n r = r['IndoorConcentration'].agg([np.mean, np.max, np.min, np.std])\n\n r['MaxDailyDelta'] = r['amax']/r['amin']\n r = r.replace([np.inf, -np.inf], np.nan).dropna()\n print(len(r) ,r['MaxDailyDelta'].median())\n\n\n\nmaxmin = lambda x: x['AttenuationGroundwater'].max()/x['AttenuationGroundwater'].min()\n\nprint( maxmin(open), maxmin(closed) )\n\ncolors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n\n#fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2,sharex=True,dpi=300)\n\nfig = plt.figure(constrained_layout=True, dpi=300)\n\ngs = GridSpec(3, 4, figure=fig)\n\n\nax1 = fig.add_subplot(gs[0, 0:2])\n# identical to ax1 = plt.subplot(gs.new_subplotspec((0, 0), colspan=3))\nax2 = fig.add_subplot(gs[0, 2:4])\nax3 = fig.add_subplot(gs[1:3, 0:3])\nax4 = fig.add_subplot(gs[1:3, 3:4])\n\n\n# pressure input\n# needs some smoothing\nx_smooth = np.linspace(0,23,200)\ny_smooth = CubicSpline(open['Time'], open['IndoorOutdoorPressure'])(x_smooth)\nax1.plot(x_smooth, y_smooth, color='k')\n\n\n# ae input\ny_smooth = CubicSpline(open['Time'], open['AirExchangeRate'])(x_smooth)\nax2.plot(x_smooth, y_smooth, label='Diurnal $A_e$')\n\nopen_const.plot(\n x='Time',\n y='AirExchangeRate',\n ax=ax2,\n label='Constant $A_e$',\n legend=False,\n)\n\n# open cases\nopen.plot(\n x='Time',\n y='AttenuationGroundwater',\n ax=ax3,\n legend=False,\n color=colors[0],\n logy=True,\n)\nopen_const.plot(\n x='Time',\n y='AttenuationGroundwater',\n ax=ax3,\n legend=False,\n color=colors[1],\n logy=True,\n)\n\n\n# closed cases\ny_smooth = CubicSpline(closed['Time'], closed['AttenuationGroundwater'])(x_smooth)\nax3.semilogy(x_smooth, y_smooth, label='Diurnal $A_e$',linestyle='--',color=colors[0])\nclosed_const.plot(\n x='Time',\n y='AttenuationGroundwater',\n ax=ax3,\n linestyle='--',\n legend=False,\n color=colors[1],\n logy=True,\n)\n\n\n#formatting\n\nax1.set(\n title='Indoor/outdoor pressure difference input',\n ylabel='$p_\\\\mathrm{in} \\\\; \\\\mathrm{(Pa)}$',\n)\n\nax2.set(\n title='Air exchange rate input',\n ylabel='$A_e \\\\; \\\\mathrm{(1/h)}$',\n)\n\nax3.set(\n title='Predicted attenuation factor over a \"typical\" day',\n xlabel='Time (h)',\n ylabel='$\\\\alpha_\\\\mathrm{gw}$',\n #yscale='log',\n ylim=[5e-6, 2e-4]\n)\n\nax4.axis('off')\n\n# legend stuff\nhandles, labels = [], []\n\nhandles.append(plt.Line2D((0,1),(0,1),color=colors[0]))\nhandles.append(plt.Line2D((0,1),(0,1),color=colors[1]))\nhandles.append(plt.Line2D((0,1),(0,1),color='k'))\nhandles.append(plt.Line2D((0,1),(0,1),color='k',linestyle='--'))\nlabels.append('Varying air exchange rate')\nlabels.append('Constant air exchange rate')\nlabels.append('Preferential pathway present')\nlabels.append('Preferential pathway absent')\n\n\nax4.legend(handles, labels, loc='center')\n\nplt.tight_layout()\nplt.savefig('../../figures/preferential_pathways/modeling_diurnal.pdf')\n#plt.show()\n","repo_name":"jstr0em/thesis","sub_path":"code/preferential_pathways/modeling_diurnal.py","file_name":"modeling_diurnal.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19579758982","text":"import sqlite3\nfrom sqlite3 import Error\n\ndef create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(f\"Error: {e}\")\n raise # Raising the exception to propagate the error\n\n return conn\n\ndef createUser(conn,name, discordId):\n crix = 500\n sql = ''' INSERT INTO user (name, crix, discordId)\n VALUES (?, ?, ?) '''\n cur = conn.cursor()\n cur.execute(sql, (name, crix, discordId))\n conn.commit()\n return cur.lastrowid\n\n\ndef createPlayer(conn,playerID,playerName,valueCrix,position,photoLink,userId):\n crix = 500\n sql = ''' INSERT INTO players (playerID,playerName,valueCrix,position,photoLink,userId)\n VALUES (?, ?, ?, ?, ?, ?) '''\n cur = conn.cursor()\n cur.execute(sql, (playerID,playerName,valueCrix,position,photoLink,userId))\n conn.commit()\n return cur.lastrowid\n\ndef getUserId(conn,discordId):\n sql = ''' SELECT discordId FROM user WHERE discordId = ? '''\n cur = conn.cursor()\n cur.execute(sql, (discordId,))\n result = cur.fetchone()\n if result is None:\n return False\n else:\n return True\n\ndef getPlayerId(conn,playerID):\n sql = ''' SELECT playerId FROM players WHERE playerId = ? '''\n cur = conn.cursor()\n cur.execute(sql, (playerID,))\n result = cur.fetchone()\n if result is None:\n return False\n else:\n return True\n\ndef getAllPlayers(conn,userId):\n sql = ''' SELECT playerName FROM players WHERE userId = ? '''\n cur = conn.cursor()\n cur.execute(sql, (userId,))\n result = cur.fetchall()\n return [row[0] for row in result]\n\ndef getUserIdByPlayerName(conn,playerName):\n sql = ''' SELECT userId FROM players WHERE playerName == ? '''\n cur = conn.cursor()\n cur.execute(sql,(playerName,))\n result = cur.fetchone()\n return result\n\n# Crix\ndef getCrix(conn,discordId):\n sql = ''' SELECT crix FROM user WHERE discordId = ? '''\n cur = conn.cursor()\n cur.execute(sql, (discordId,))\n result = cur.fetchone()\n return result[0]\n\ndef setCrix(conn,crix,discordId):\n sql = ''' UPDATE user SET crix = ? WHERE discordId = ? '''\n cur = conn.cursor()\n cur.execute(sql, (crix,discordId))\n result = cur.fetchone()\n return cur.lastrowid\n\n\n","repo_name":"dre7djib/PackUltimateSquad-Bot-Discord","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9997564714","text":"import sys\nfrom PySide6 import QtWidgets, QtGui\n\n\nclass Main(QtWidgets.QMainWindow):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.filename = \"\"\n self.init_ui()\n\n def init_ui(self):\n self.text = QtWidgets.QTextEdit(self)\n self.setCentralWidget(self.text)\n self.toolbar = self.init_toolbar()\n self.addToolBarBreak()\n self.formatbar = self.init_format_bar()\n self.init_menubar()\n self.statusbar = self.statusBar()\n self.setGeometry(100, 100, 1030, 800)\n self.setWindowTitle(\"Qt Editor\")\n\n def init_toolbar(self):\n self.new_action = QtGui.QAction(QtGui.QIcon(\"icons/new.png\"), \"New\", self)\n self.new_action.setStatusTip(\"Create a new document from scratch\")\n self.new_action.setShortcut((\"Ctrl+N\"))\n self.new_action.triggered.connect(self.new_file)\n\n self.open_action = QtGui.QAction(QtGui.QIcon(\"icons/open.png\"), \"Open file\", self)\n self.open_action.setStatusTip(\"Open existing document\")\n self.open_action.setShortcut((\"Ctrl+O\"))\n self.open_action.triggered.connect(self.open_file)\n\n self.save_action = QtGui.QAction(QtGui.QIcon(\"icons/save.png\"), \"Save\", self)\n self.save_action.setStatusTip(\"Save document\")\n self.save_action.setShortcut((\"Ctrl+S\"))\n self.save_action.triggered.connect(self.save_file)\n\n toolbar = self.addToolBar(\"Options\")\n toolbar.addAction(self.new_action)\n toolbar.addAction(self.open_action)\n toolbar.addAction(self.save_action)\n toolbar.addSeparator()\n return toolbar\n\n def init_format_bar(self):\n formatbar = self.addToolBar(\"Format\")\n return formatbar\n\n def init_menubar(self):\n menubar = self.menuBar()\n file = menubar.addMenu(\"File\")\n edit = menubar.addMenu(\"Edit\")\n view = menubar.addMenu(\"View\")\n\n def new_file(self):\n spawn = Main(self)\n spawn.show()\n\n def save_file(self):\n print(\"Saving\")\n\n def open_file(self):\n print(\"Opening\")\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n main = Main()\n main.show()\n\n sys.exit(app.exec())","repo_name":"dppdoran/QtEditor","sub_path":"ed.py","file_name":"ed.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40080332367","text":"\n\nimport unittest\n\nimport torch\n\nfrom allennlp.common import Params\nfrom allennlp.data import Vocabulary, DatasetReader\nfrom allennlp.data.iterators import BasicIterator\nfrom allennlp.models import Model\n\n\nfrom kb.include_all import ModelArchiveFromParams\nfrom kb.kg_embedding import KGTupleReader\n\nfrom kb.knowbert import EntityDisambiguator, KnowBert\nfrom kb.knowbert import EntityLinkingWithCandidateMentions, SolderedKG\n\nARCHIVE_FILE = \"tests/fixtures/kg_embeddings/tucker_wordnet/model.tar.gz\"\n\n\ndef get_fixtures(include_gold_entities=False,\n include_lm_labels=True,\n include_contextual_embeddings=False):\n vocab = Vocabulary.from_params(Params({\n \"directory_path\": \"tests/fixtures/kg_embeddings/tucker_wordnet/vocabulary\",\n }))\n \n batch = {'next_sentence_label': torch.tensor([0, 1, 1]),\n 'tokens': {'tokens': torch.tensor([[16, 16, 11, 1, 1, 1, 17, 1, 1, 1],\n [16, 16, 1, 12, 1, 17, 1, 1, 1, 1],\n [16, 16, 1, 1, 17, 1, 13, 17, 17, 0]])},\n \n 'segment_ids': torch.tensor([[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 1, 1, 1, 0, 0, 0]]),\n\n 'lm_label_ids': {'lm_labels': torch.tensor(\n [[0, 1, 0, 0, 13, 0, 1, 1, 13, 0],\n [0, 0, 1, 0, 0, 2, 1, 1, 13, 0],\n [0, 1, 1, 0, 1, 1, 0, 0, 0, 0]])},\n\n 'candidates': {'wordnet': {'candidate_entity_priors': torch.tensor(\n [[[1.0000, 0.0000, 0.0000, 0.0000, 0.0000],\n [0.0000, 0.0000, 0.0000, 0.0000, 0.0000]],\n \n [[0.2500, 0.2500, 0.2500, 0.2500, 0.0000],\n [0.2000, 0.2000, 0.2000, 0.2000, 0.2000]],\n \n [[1.0000, 0.0000, 0.0000, 0.0000, 0.0000],\n [0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]),\n\n 'candidate_entities': {'ids': torch.tensor(\n [[[ 67, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0]],\n \n [[344, 349, 354, 122, 0],\n [101, 46, 445, 25, 28]],\n \n [[ 0, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0]]])},\n\n 'candidate_segment_ids': torch.tensor([[0, 1], [0, 1], [0, 0]]),\n\n 'candidate_spans': torch.tensor(\n [[[ 1, 1],\n [-1, -1]],\n \n [[ 1, 1],\n [ 4, 4]],\n \n [[-1, -1],\n [-1, -1]]])}}}\n\n if include_gold_entities:\n batch['gold_entities'] = {\n 'wordnet': {'ids': torch.tensor(\n [[[ 67],\n [0]],\n\n [[ 349],\n [ 46]],\n\n [[0],\n [0]]])}}\n\n if not include_lm_labels:\n del batch['next_sentence_label']\n del batch['lm_label_ids']\n\n if include_contextual_embeddings:\n batch_size, timesteps = batch['tokens']['tokens'].shape\n batch['contextual_embeddings'] = torch.rand(batch_size, timesteps, 12)\n batch['tokens_mask'] = batch['tokens']['tokens'] > 0\n del batch['tokens']\n\n return vocab, batch\n\n\nclass TestEntityDisambiguator(unittest.TestCase):\n def test_entity_disambiguator(self):\n vocab, batch = get_fixtures()\n\n contextual_embedding_dim = 12\n entity_embedding_dim = 11\n entity_embeddings = torch.nn.Embedding(500, entity_embedding_dim)\n\n disambiguator = EntityDisambiguator(\n contextual_embedding_dim,\n entity_embedding_dim,\n entity_embeddings,\n weighted_entity_threshold=0.3,\n null_entity_id=499\n )\n\n batch_size, timesteps = batch['tokens']['tokens'].shape\n contextual_embeddings = torch.rand(batch_size, timesteps, contextual_embedding_dim)\n\n output = disambiguator(\n contextual_embeddings,\n batch['tokens']['tokens'] > 0,\n batch['candidates']['wordnet']['candidate_spans'],\n batch['candidates']['wordnet']['candidate_entities']['ids'],\n batch['candidates']['wordnet']['candidate_entity_priors'],\n batch['candidates']['wordnet']['candidate_segment_ids']\n )\n\n _, num_spans, num_candidates = batch['candidates']['wordnet']['candidate_entities']['ids'].shape\n self.assertEqual(\n list(output['linking_scores'].shape), [batch_size, num_spans, num_candidates]\n )\n self.assertEqual(\n list(output['weighted_entity_embeddings'].shape), [batch_size, num_spans, entity_embedding_dim]\n )\n\n\nclass TestEntityLinkingWithCandidateMentions(unittest.TestCase):\n def _get_model(self, vocab):\n params = Params({\n \"type\": \"entity_linking_with_candidate_mentions\",\n \"kg_model\": {\n \"type\": \"from_archive\",\n \"archive_file\": ARCHIVE_FILE,\n },\n \"contextual_embedding_dim\": 12,\n })\n model = Model.from_params(params, vocab=vocab)\n model.unfreeze(None)\n return model\n\n\n def test_entity_linking(self):\n vocab, batch = get_fixtures(include_gold_entities=True,\n include_lm_labels=False,\n include_contextual_embeddings=True)\n model = self._get_model(vocab)\n\n output = model(\n batch['contextual_embeddings'],\n batch['tokens_mask'],\n gold_entities=batch['gold_entities']['wordnet'],\n **batch['candidates']['wordnet'])\n\n self.assertTrue('loss' in output)\n\n\nclass TestSolderedKG(unittest.TestCase):\n def test_soldered_kg(self):\n vocab, batch = get_fixtures(include_gold_entities=True,\n include_lm_labels=False,\n include_contextual_embeddings=True)\n\n params = Params({\n \"type\": \"soldered_kg\",\n \"entity_linker\": {\n \"type\": \"entity_linking_with_candidate_mentions\",\n \"kg_model\": {\n \"type\": \"from_archive\",\n \"archive_file\": ARCHIVE_FILE,\n },\n \"contextual_embedding_dim\": 12,\n },\n \"span_attention_config\": {\n \"hidden_size\": 24,\n \"num_hidden_layers\": 2,\n \"num_attention_heads\": 4,\n \"intermediate_size\": 55\n }\n })\n model = Model.from_params(params, vocab=vocab)\n model.unfreeze(None)\n\n output = model(\n batch['contextual_embeddings'],\n batch['tokens_mask'],\n **batch['candidates']['wordnet']\n )\n\n self.assertEqual(\n batch['contextual_embeddings'].shape,\n output['contextual_embeddings'].shape\n )\n\n\ndef get_knowbert(vocab, mode, include_wiki=False):\n params = {\n \"type\": \"knowbert\",\n \"mode\": mode,\n \"soldered_kgs\": {\n \"wordnet\": {\n \"type\": \"soldered_kg\",\n \"entity_linker\": {\n \"type\": \"entity_linking_with_candidate_mentions\",\n \"kg_model\": {\n \"type\": \"from_archive\",\n \"archive_file\": ARCHIVE_FILE,\n },\n \"contextual_embedding_dim\": 12,\n \"max_sequence_length\": 64,\n \"span_encoder_config\": {\n \"hidden_size\": 24,\n \"num_hidden_layers\": 1,\n \"num_attention_heads\": 3,\n \"intermediate_size\": 37\n },\n },\n \"span_attention_config\": {\n \"hidden_size\": 24,\n \"num_hidden_layers\": 2,\n \"num_attention_heads\": 4,\n \"intermediate_size\": 55\n }\n },\n },\n \"soldered_layers\": {\"wordnet\": 1},\n \"bert_model_name\": \"tests/fixtures/bert/bert_test_fixture.tar.gz\",\n }\n\n if include_wiki:\n params[\"soldered_kgs\"][\"wiki\"] = {\n \"type\": \"soldered_kg\",\n \"entity_linker\": {\n \"type\": \"entity_linking_with_candidate_mentions\",\n \"namespace\": \"entity_wiki\",\n \"entity_embedding\": {\n \"num_embeddings\": 14,\n \"embedding_dim\": 24,\n },\n \"contextual_embedding_dim\": 12,\n \"max_sequence_length\": 64,\n \"span_encoder_config\": {\n \"hidden_size\": 24,\n \"num_hidden_layers\": 1,\n \"num_attention_heads\": 3,\n \"intermediate_size\": 37\n },\n },\n \"span_attention_config\": {\n \"hidden_size\": 24,\n \"num_hidden_layers\": 1,\n \"num_attention_heads\": 4,\n \"intermediate_size\": 55\n }\n }\n params[\"soldered_layers\"][\"wiki\"] = 0\n params[\"soldered_kgs\"][\"wordnet\"][\"entity_linker\"][\"namespace\"] = \"entity_wordnet\"\n\n model = Model.from_params(Params(params), vocab=vocab)\n return model\n\n\nclass TestKnowBert(unittest.TestCase):\n def test_knowbert_el(self):\n vocab, batch = get_fixtures(include_gold_entities=True)\n model = get_knowbert(vocab, None)\n output = model(**batch)\n loss = output['loss']\n loss.backward()\n self.assertTrue(True)\n\n def test_knowbert_el_pretrain(self):\n vocab, batch = get_fixtures(include_gold_entities=True)\n model = get_knowbert(vocab, 'entity_linking')\n output = model(**batch)\n loss = output['loss']\n loss.backward()\n self.assertTrue(True)\n\n def test_knowbert_simple(self):\n vocab, batch = get_fixtures()\n model = get_knowbert(vocab, None)\n output = model(**batch)\n loss = output['loss']\n loss.backward()\n self.assertTrue(True)\n\n\nclass TestKnowBertWikiWordnet(unittest.TestCase):\n def test_knowbert_wiki_wordnet(self):\n from kb.testing import get_bert_pretraining_reader_with_kg\n\n reader = get_bert_pretraining_reader_with_kg(\n mask_candidate_strategy='full_mask', masked_lm_prob=0.35, include_wiki=True)\n instances = reader.read(\"tests/fixtures/bert_pretraining/shard1.txt\")\n\n vocab = Vocabulary.from_params(Params({\n \"directory_path\": \"tests/fixtures/wordnet_wiki_vocab\",\n }))\n\n iterator = BasicIterator()\n iterator.index_with(vocab)\n\n for batch in iterator(instances, num_epochs=1, shuffle=False):\n pass\n\n # hack, incompatitable fixtures...\n batch['tokens']['tokens'] = torch.min(batch['tokens']['tokens'], torch.tensor([17]))\n batch['lm_label_ids']['lm_labels'] = torch.min(batch['lm_label_ids']['lm_labels'], torch.tensor([17]))\n model = get_knowbert(vocab, None, include_wiki=True)\n output = model(**batch)\n loss = output['loss']\n loss.backward()\n self.assertTrue(True)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"allenai/kb","sub_path":"tests/test_knowbert.py","file_name":"test_knowbert.py","file_ext":"py","file_size_in_byte":11240,"program_lang":"python","lang":"en","doc_type":"code","stars":355,"dataset":"github-code","pt":"81"} +{"seq_id":"13900609371","text":"import os\nimport numpy as np\nimport numpy.testing as npt\nimport unittest\nimport h5py\n\nclass TestRestart(unittest.TestCase):\n \"\"\"Recursivly iterate over val*.value() till h5py.Datasets are found and validate those\"\"\"\n def checkRecords(self, valIs, valExpected, sortValues):\n if isinstance(valExpected, h5py.Dataset):\n self.assertEqual(valIs.shape, valExpected.shape, \"Record: \" + valExpected.name)\n if sortValues:\n valArrayIs = np.sort(valIs)\n valArrayExpected = np.sort(valExpected)\n else:\n valArrayIs = np.array(valIs)\n valArrayExpected = np.array(valExpected)\n npt.assert_allclose(valArrayIs, valArrayExpected, err_msg = \"Record: \" + valExpected.name)\n else:\n for ds1, ds2 in zip(valIs.values(), valExpected.values()):\n self.checkRecords(ds1, ds2, sortValues)\n\n def testRestart(self):\n self.longMessage = True\n checkpointPath = os.environ[\"TEST_SIMOUTPUT_PATH\"] + \"/checkpoints\"\n hdf5Path = checkpointPath + \"/hdf5_checkpoint_100.h5\"\n hdf5PathOrig = hdf5Path.replace(\"out_Restart\", \"out_Checkpoint\")\n \n with h5py.File(hdf5Path) as f1, h5py.File(hdf5PathOrig) as f2:\n data1 = list(f1[\"data\"].values())[0]\n data2 = list(f2[\"data\"].values())[0]\n fields1 = data1.get(\"fields\") or data1[\"meshes\"]\n fields2 = data2.get(\"fields\") or data2[\"meshes\"]\n # Number of fields\n self.assertEqual(len(fields1), len(fields2))\n self.checkRecords(fields1, fields2, False)\n \n particles1 = data1[\"particles\"]\n particles2 = data2[\"particles\"]\n # Number of species\n self.assertEqual(len(particles1), len(particles2))\n for species1, species2 in zip(particles1.values(), particles2.values()):\n # Number of species attributes\n self.assertEqual(len(species1), len(species2), \"Species: \" + species1.name)\n for prop1, prop2 in zip(species1.values(), species2.values()):\n self.assertEqual(len(prop1.attrs), len(prop2.attrs), \"Species attributes: \" + species1.name)\n for attr1, attr2 in zip(prop1.attrs.items(), prop2.attrs.items()):\n npt.assert_equal(attr1[1], attr2[1], err_msg = \"Attribute: \" + prop1.name + \"/\" + attr1[0])\n self.checkRecords(prop1, prop2, True)\n \n def testDetector(self):\n self.longMessage = True\n checkpointPath = os.environ[\"TEST_SIMOUTPUT_PATH\"] + \"/checkpoints\"\n hdf5Path = checkpointPath + \"/PhotonDetector_checkpoint_100.h5\"\n hdf5PathOrig = hdf5Path.replace(\"out_Restart\", \"out_Checkpoint\")\n \n with h5py.File(hdf5Path) as f1, h5py.File(hdf5PathOrig) as f2:\n data1 = list(f1[\"data\"].values())[0]\n data2 = list(f2[\"data\"].values())[0]\n fields1 = data1.get(\"fields\") or data1[\"meshes\"]\n fields2 = data2.get(\"fields\") or data2[\"meshes\"]\n # Number of fields\n self.assertEqual(len(fields1), len(fields2))\n self.checkRecords(fields1, fields2, False)\n \nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"ComputationalRadiationPhysics/parataxis","sub_path":"examples/hdf5Test/testData/validateRestart.py","file_name":"validateRestart.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"34665114885","text":"#----------------------------------------------------\n# Histogramme d'altitude de l'Europe\n#----------------------------------------------------\n\n\n# PACKAGES\nfrom PIL import Image # On charge Python Image Library\nimport numpy as np # On charge Numpy \nfrom matplotlib import pyplot as plt # On charge pyplot (un sous module de Matplotlib) et on le renomme plt\n\n# TRAITEMENT IMAGE\nim = Image.open('europe.tif') # PIL permet de lire tous les formats d'images\nNx, Ny = im.size # On reduit la definition de l'image\nim = im.resize((Nx/5, Ny/5), Image.ANTIALIAS)\nZ = np.array(im).astype(np.float64) # On convertir l'image en array \nmax_altitude = 1000. # Altitude maximale en metres, cette donnee est un peu douteuse, (a confirmer).\nZ = Z / Z.max() * max_altitude # On recale les altitudes \n\n\n\n\n\n\n\n# AFFICHAGE\nn_classes = 10 # Nombre de classes\nfig = plt.figure()\nfig.add_subplot(121)\nplt.title('Hisogramme') \nplt.ylabel('Surfaces $km^2$') # On specifie le label en y\nplt.hist(Z.flatten(), bins=n_classes) # Histogramme\nfig.add_subplot(122)\nplt.title('Hisogramme cumule') \nplt.hist(Z.flatten(), bins=n_classes, cumulative=True) # Histogramme cumule\nplt.xlabel('Altitudes $m$') # On specifie le label en x\n\n\nplt.show() # On affiche l'image\n\n","repo_name":"lcharleux/numerical_analysis","sub_path":"doc/Traitement_images/Example_code/europe_hist.py","file_name":"europe_hist.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2126202386","text":"from re import M\nfrom constants import market_data as md_const\nfrom typing import Dict, Iterable, Optional, Union\nfrom utils import http\n\n_DataType = Union[str, int, float]\n_PriceDict = Dict[str, Dict[str, _DataType]]\n\n_HARDCODED_PRICES = {\n 'gold': 1.,\n 'silver': 0.,\n}\n\n\nclass MarketClient(object):\n\n def __init__(self, region: str = 'North America West'):\n self.region = region\n self.cache = {}\n\n def get_price_data(self, item_ids: Iterable[str]) -> _PriceDict:\n '''Returns the raw market data from lostarkmarket.online.\n\n See https://documenter.getpostman.com/view/20821530/UyxbppKr\n for more API info.\n\n Args:\n item_ids: sequence of item ids to fetch.\n\n Returns:\n A dictionary containing the raw data from lostarkmarket.online for each\n item id specified. If an item does not exist, the dictionary will omit that\n item. For example, if item_ids is ['basic-oreha-fusion-material-2'], the\n output may be:\n\n {\n 'basic-oreha-fusion-material-2': {\n 'amount': 1,\n 'avgPrice': 8.9,\n 'category': 'Enhancement Material',\n 'cheapestRemaining': 356969,\n 'gameCode': '6861008',\n 'id': 'basic-oreha-fusion-material-2',\n 'image': 'https://www.lostarkmarket.online/assets/item_icons/basic-oreha-fusion-material.webp',\n 'lowPrice': 9,\n 'name': 'Basic Oreha Fusion Material',\n 'rarity': 2,\n 'recentPrice': 9,\n 'shortHistoric': {\n '2022-06-06': 9,\n '2022-06-07': 9,\n '2022-06-08': 9,\n '2022-06-09': 9,\n '2022-06-10': 9,\n '2022-06-11': 8.96,\n '2022-06-12': 8\n },\n 'subcategory': 'Honing Materials',\n 'updatedAt': '2022-06-12T19:58:29.631Z'\n }\n }\n\n Raises:\n requests.HTTPError: An error occurred retrieving data from the API.\n '''\n item_ids = [\n item_id for item_id in item_ids if item_id not in self.cache\n ]\n\n if not item_ids:\n return self.cache\n\n request_url = f'{md_const.MARKET_API}/export-market-live/{self.region}'\n raw_json = http.make_request('GET',\n request_url,\n params={'items': ','.join(item_ids)})\n self.cache.update({item['id']: item for item in raw_json})\n return self.cache\n\n def get_price_data_for_category(self, category: str) -> _PriceDict:\n request_url = f'{md_const.MARKET_API}/export-market-live/{self.region}'\n raw_json = http.make_request('GET',\n request_url,\n params={'category': category})\n self.cache.update({item['id']: item for item in raw_json})\n return self.cache\n\n def get_unit_price(self, item_id: str):\n if item_id in _HARDCODED_PRICES:\n return _HARDCODED_PRICES[item_id]\n\n if item_id in self.cache:\n price_json = self.cache[item_id]\n return price_json['lowPrice'] / price_json['amount']\n\n if item_id.endswith('-shard'):\n low_unit_price = float('inf')\n for suffix, amount in (('-pouch-s-1', 500), ('-pouch-m-2', 1000),\n ('-pouch-l-3', 1500)):\n pouch_id = item_id + suffix\n price_json = self.get_price_data([pouch_id])[pouch_id]\n unit_price = price_json['lowPrice'] / \\\n price_json['amount'] / amount\n if unit_price < low_unit_price:\n low_unit_price = unit_price\n low_price = price_json['lowPrice']\n low_amount = price_json['amount'] * amount\n low_id = pouch_id\n self.cache[item_id] = {\n 'id': low_id,\n 'lowPrice': low_price,\n 'amount': low_amount\n }\n else:\n self.get_price_data([item_id])\n\n price_json = self.cache[item_id]\n return price_json['lowPrice'] / price_json['amount']\n\n def item_gold_prices(self, item_ids: Iterable[str]) -> Dict[str, float]:\n '''\n Args:\n item_ids: sequence of item ids to fetch.\n\n Returns:\n A dictionary containing the current lowest market price from lostarkmarket.online \n for each item id specified. If an item does not exist, the dictionary will omit that\n item.\n '''\n prices = self.get_price_data(item_ids)\n return {\n item_id: float(prices[item_id]['lowPrice'])\n for item_id in item_ids\n }\n\n def gold_of_crystal(self) -> float:\n '''\n Request_Data wrapper to return only the lowest price for blue crystal\n Args:\n None\n\n Returns:\n A float representing the current lowest price for blue crystals\n '''\n price: float = self.item_gold_prices([md_const.BLUE_CRYSTAL_ID\n ])[md_const.BLUE_CRYSTAL_ID]\n return price\n\n def item_mari_prices(self):\n '''\n Converts LostArkMarket Mari shop prices from crystal to gold\n\n Args:\n None\n\n Returns:\n A dictionary containing items where each key is the LostArkMarket ID and the\n value will the respective gold cost calculated by using the current blue crystal\n price\n '''\n\n # Individual gold cost of each item in Mari's\n mari_gold_costs = {}\n for (item, (bc_price, bundle_no)) in md_const.MARI_ITEM_INFO.items():\n mari_gold_costs[item] = round(\n (self.gold_of_crystal() * bc_price / bundle_no), 2)\n\n return mari_gold_costs\n\n def profitable_mari_items(self) -> str:\n '''\n Displays all the profitable purchases available in Mari shop. If an item in mari\n shop has a lower gold than its market counterpart, it will display the item and\n percentage discount\n\n Args:\n None\n\n Returns:\n An ugly string dump\n '''\n mari_prices = self.item_mari_prices()\n output = \"\"\n for (item, gold_price) in self.item_gold_prices(\n md_const.MARI_ITEM_INFO).items():\n price_diff = mari_prices[item] - gold_price\n # Will do a pretty format later, want to test to see how this gets displayed on discord first\n if price_diff < 0:\n percent_diff = round(price_diff / gold_price * 100, 2)\n output += f\"\\n {item}: {mari_prices[item]}g (-{percent_diff}%)\"\n\n return output\n","repo_name":"oozio/lost-ark-guild-bot","sub_path":"utils/lost_ark/market_prices.py","file_name":"market_prices.py","file_ext":"py","file_size_in_byte":6828,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"71689065225","text":"from logging import Logger\nfrom datetime import datetime\nfrom uuid import UUID, uuid5, NAMESPACE_X500\n\nfrom dds_loader.dds_models.hub_models import *\nfrom dds_loader.dds_models.link_models import *\nfrom dds_loader.dds_models.satellite_models import *\nfrom lib.kafka_connect.kafka_connectors import KafkaConsumer\nfrom lib.kafka_connect.kafka_connectors import KafkaProducer\nfrom dds_loader.repository.dds_repository import DdsRepository\n\nclass DdsMessageProcessor:\n def __init__(self,\n consumer: KafkaConsumer,\n producer: KafkaProducer,\n dds_repository: DdsRepository,\n logger: Logger,\n batch_size: int = 30) -> None:\n\n self._logger = logger\n self._consumer = consumer\n self._producer = producer\n self._dds_repository = dds_repository\n self._batch_size = batch_size\n\n def _insert_h_order(self,\n order_id: int,\n order_dt: str,\n h_order_pk: UUID) -> None:\n \n h_order = HOrder(order_id=order_id,\n order_dt=order_dt,\n h_order_pk=h_order_pk)\n\n self._dds_repository.insert(h_order)\n \n def _insert_s_order_cost(self,\n cost: str,\n payment: str,\n h_order_pk: UUID,\n hk_order_cost_pk: UUID) -> None:\n\n s_order_cost = SOrderCost(cost=cost,\n payment=payment,\n h_order_pk=h_order_pk,\n hk_order_cost_pk=hk_order_cost_pk)\n \n self._dds_repository.insert(s_order_cost)\n \n def _insert_s_order_status(self,\n status: str,\n h_order_pk: UUID,\n hk_order_status_pk: UUID) -> None:\n\n s_order_status = SOrderStatus(status=status,\n h_order_pk=h_order_pk,\n hk_order_status_pk=hk_order_status_pk)\n\n self._dds_repository.insert(s_order_status)\n\n\n def _insert_h_restaurant(self,\n restaurant_id: str,\n h_restaurant_pk: UUID) -> None:\n\n h_restaurant = HRestaurant(restaurant_id=restaurant_id,\n h_restaurant_pk=h_restaurant_pk)\n\n self._dds_repository.insert(h_restaurant)\n\n def _insert_s_restaurant_names(self,\n name: str,\n h_restaurant_pk: UUID,\n hk_restaurant_names_pk: UUID) -> None:\n\n s_restaurant_names = SRestaurantNames(name=name,\n h_restaurant_pk=h_restaurant_pk,\n hk_restaurant_names_pk=hk_restaurant_names_pk)\n\n self._dds_repository.insert(s_restaurant_names)\n\n\n def _insert_s_user_names(self,\n user_name: str,\n h_user_pk: UUID,\n user_login: str,\n hk_user_names_pk: UUID) -> None:\n\n s_user_names = SUserNames(username=user_name,\n h_user_pk=h_user_pk,\n userlogin=user_login,\n hk_user_names_pk=hk_user_names_pk)\n\n self._dds_repository.insert(s_user_names)\n\n\n def _insert_l_user_order(self,\n h_user_pk: UUID,\n h_order_pk: UUID,\n hk_order_user_pk: UUID) -> None:\n\n l_user_order = LOrderUser(h_user_pk=h_user_pk,\n h_order_pk=h_order_pk,\n hk_order_user_pk=hk_order_user_pk)\n\n self._dds_repository.insert(l_user_order)\n\n def _insert_h_product(self,\n product_id: str,\n h_product_pk: UUID) -> None:\n\n h_product = HProduct(product_id=product_id,\n h_product_pk=h_product_pk)\n\n self._dds_repository.insert(h_product)\n\n def _insert_s_poduct_names(self,\n name: str,\n h_product_pk: UUID,\n hk_product_names_pk: UUID) -> None:\n\n s_poduct_names = SProductNames(name=name,\n h_product_pk=h_product_pk,\n hk_product_names_pk=hk_product_names_pk)\n\n self._dds_repository.insert(s_poduct_names)\n\n def _insert_h_category(self,\n category_name: str,\n h_category_pk: UUID) -> None:\n \n h_category = HCategory(h_category_pk=h_category_pk,\n category_name=category_name)\n \n self._dds_repository.insert(h_category)\n\n def _insert_l_product_category(self,\n h_product_pk: UUID,\n h_category_pk: UUID,\n hk_product_category_pk: UUID) -> None:\n\n l_product_category = LProductCategory(h_product_pk=h_product_pk,\n h_category_pk=h_category_pk,\n hk_product_category_pk=hk_product_category_pk)\n\n self._dds_repository.insert(l_product_category)\n\n def _insert_h_user(self,\n user_id: str,\n h_user_pk: UUID) -> None:\n\n h_user = HUser(user_id=user_id, h_user_pk=h_user_pk)\n\n self._dds_repository.insert(h_user)\n\n def _insert_l_product_restaurant(self,\n h_product_pk: UUID,\n h_restaurant_pk: UUID,\n hk_product_restaurant_pk: UUID) -> None:\n\n l_product_restaurant = LProductRestaurant(h_product_pk=h_product_pk,\n h_restaurant_pk=h_restaurant_pk,\n hk_product_restaurant_pk=hk_product_restaurant_pk)\n\n self._dds_repository.insert(l_product_restaurant)\n \n def _insert_l_order_product(self,\n h_order_pk: UUID,\n h_product_pk: UUID,\n hk_order_product_pk: UUID) -> None:\n\n l_order_product = LOrderProduct(h_order_pk=h_order_pk,\n h_product_pk=h_product_pk,\n hk_order_product_pk=hk_order_product_pk)\n\n self._dds_repository.insert(l_order_product)\n\n def __create_output_message(self, user_id: UUID) -> dict:\n\n lst_products = self._dds_repository.get_grouped_data(user_id, ['h_product_pk', 'name'])\n lst_categories = self._dds_repository.get_grouped_data(user_id, ['h_category_pk', 'category_name'])\n\n products = [{'id': str(item[1]), 'name': item[2], 'cnt': item[3]} for item in lst_products]\n categories = [{'id': str(item[1]), 'name': item[2], 'cnt': item[3]} for item in lst_categories]\n\n return {'user_id': str(user_id), 'products': products, 'categories': categories}\n\n def _message_processing(self, message: dict) -> UUID:\n\n payload = message['payload']\n products = payload['products']\n \n user = payload['user']\n restaurant = payload['restaurant']\n \n # filed for dds.h_order\n order_dt = payload['date']\n order_id = message['object_id']\n h_order_pk = uuid5(NAMESPACE_X500, str(order_id))\n\n # filed for dds.s_order_cost\n cost = payload['cost']\n payment = payload['payment']\n hk_order_cost_pk = uuid5(NAMESPACE_X500, str(order_id) + str(cost))\n\n # filed for dds.s_order_status\n status = payload['status']\n hk_order_status_pk = uuid5(NAMESPACE_X500, str(order_id) + status)\n \n # filed for dds.h_restaurant\n restaurant_id = restaurant['id']\n h_restaurant_pk = uuid5(NAMESPACE_X500, restaurant_id)\n\n # filed for dds.s_restaurant_names\n restaurant_name = restaurant['name']\n hk_restaurant_names_pk = uuid5(NAMESPACE_X500, restaurant_id + restaurant_name)\n \n # filed for dds.h_user\n user_id = user['id']\n h_user_pk = uuid5(NAMESPACE_X500, user_id)\n\n # filed for dds.s_user_names\n user_name = user['name']\n user_login = user['login']\n hk_user_names_pk = uuid5(NAMESPACE_X500, user_id + user_name + user_login)\n\n # filed for dds.l_user_order\n hk_order_user_pk = uuid5(NAMESPACE_X500, str(order_id) + user_id)\n\n self._insert_h_order(order_id=order_id,\n order_dt=order_dt,\n h_order_pk=h_order_pk)\n \n self._insert_s_order_cost(cost=cost,\n payment=payment,\n h_order_pk=h_order_pk,\n hk_order_cost_pk=hk_order_cost_pk)\n\n self._insert_s_order_status(status=status,\n h_order_pk=h_order_pk,\n hk_order_status_pk=hk_order_status_pk)\n\n self._insert_h_restaurant(restaurant_id=restaurant_id,\n h_restaurant_pk=h_restaurant_pk)\n\n self._insert_s_restaurant_names(name=restaurant_name,\n h_restaurant_pk=h_restaurant_pk,\n hk_restaurant_names_pk=hk_restaurant_names_pk)\n\n self._insert_h_user(user_id=user_id, h_user_pk=h_user_pk)\n\n self._insert_s_user_names(user_name=user_name,\n h_user_pk=h_user_pk,\n user_login=user_login,\n hk_user_names_pk=hk_user_names_pk)\n\n self._insert_l_user_order(h_user_pk=h_user_pk,\n h_order_pk=h_order_pk,\n hk_order_user_pk=hk_order_user_pk)\n\n for item in products:\n # filed for dds.h_product\n product_id = item['id']\n h_product_pk = uuid5(NAMESPACE_X500, product_id)\n\n # filed for dds.s_product_names\n product_name = item['name']\n hk_product_names_pk = uuid5(NAMESPACE_X500, product_id + product_name)\n\n # filed for dds.h_category\n category_name = item['category']\n h_category_pk = uuid5(NAMESPACE_X500, category_name)\n\n # filed for dds.l_product_category\n hk_product_category_pk = uuid5(NAMESPACE_X500, product_id + category_name)\n\n # filed for dds.l_product_restaurant\n hk_product_restaurant_pk = uuid5(NAMESPACE_X500, product_id + restaurant_id)\n\n # filed for dds.l_order_product\n hk_order_product_pk = uuid5(NAMESPACE_X500, str(order_id) + product_id)\n\n self._insert_h_product(product_id=product_id,\n h_product_pk=h_product_pk)\n\n self._insert_s_poduct_names(name=product_name,\n h_product_pk=h_product_pk,\n hk_product_names_pk=hk_product_names_pk)\n\n self._insert_h_category(h_category_pk=h_category_pk,\n category_name=category_name)\n\n self._insert_l_product_category(h_product_pk=h_product_pk,\n h_category_pk=h_category_pk,\n hk_product_category_pk=hk_product_category_pk)\n\n self._insert_l_product_restaurant(h_product_pk=h_product_pk,\n h_restaurant_pk=h_restaurant_pk,\n hk_product_restaurant_pk=hk_product_restaurant_pk)\n\n self._insert_l_order_product(h_order_pk=h_order_pk,\n h_product_pk=h_product_pk,\n hk_order_product_pk=hk_order_product_pk)\n\n return h_user_pk\n\n\n\n def run(self) -> None:\n self._logger.info(f\"{datetime.utcnow()}: START\")\n\n for _ in range(self._batch_size):\n\n message = self._consumer.consume()\n\n if not message:\n self._logger.info(f\"{datetime.utcnow()}: NO messages. Quitting.\")\n break\n\n h_user_pk = self._message_processing(message)\n output_message = self.__create_output_message(h_user_pk)\n\n self._producer.produce(output_message)\n\n self._logger.info(f\"{datetime.utcnow()}: FINISH\")\n","repo_name":"Ivivchik/de-project-sprint-9","sub_path":"solution/service_dds/src/dds_loader/dds_message_processor_job.py","file_name":"dds_message_processor_job.py","file_ext":"py","file_size_in_byte":12901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74242777865","text":"from pathlib import Path\n\nfrom PySide6.QtGui import QImage\nfrom PySide6.QtWidgets import QApplication\n\nfrom four_letter_blocks.__main__ import FourLetterBlocksWindow\n\n\n# noinspection PyUnusedLocal\ndef test_window_creation(qt_application: QApplication):\n window = FourLetterBlocksWindow()\n\n assert window.windowTitle() == 'Four-Letter Blocks'\n\n\n# noinspection PyUnusedLocal\ndef test_grid_changed(qt_application: QApplication):\n new_grid_text = 'ABC'\n expected_clues_text = 'ABC - '\n\n window = FourLetterBlocksWindow()\n\n window.ui.grid_text.setPlainText(new_grid_text)\n\n clues_text = window.ui.clues_text.toPlainText()\n\n assert clues_text == expected_clues_text\n\n\n# noinspection PyUnusedLocal\ndef test_format_text(qt_application: QApplication):\n new_grid_text = 'ABC'\n expected_formatted_text = '''\\\nUntitled\n\nABC\n\nABC -\n\n???'''\n\n window = FourLetterBlocksWindow()\n\n window.ui.grid_text.setPlainText(new_grid_text)\n\n formatted_text = window.format_text()\n\n assert formatted_text == expected_formatted_text\n\n\n# noinspection PyUnusedLocal\ndef test_save_pdf(qt_application: QApplication, tmp_path: Path):\n text_path = tmp_path / 'input.txt'\n text_path.write_text('''\nUntitled\n\nABCD\n\nABCD - Clue\n\nXXXX\n''')\n pdf_path = tmp_path / 'output.pdf'\n\n window = FourLetterBlocksWindow()\n\n window.open_file(text_path)\n\n window.export_pdf(pdf_path)\n\n assert pdf_path.exists()\n\n\n# noinspection PyUnusedLocal\ndef test_save_png(qt_application: QApplication, tmp_path: Path):\n text_path = tmp_path / 'input.txt'\n text_path.write_text('''\nUntitled\n\nABCD\n\nABCD -\n\nXXXX\n''')\n png_path = tmp_path / 'output.png'\n\n window = FourLetterBlocksWindow()\n\n window.open_file(text_path)\n\n window.export_png(png_path)\n\n image = QImage(str(png_path))\n\n assert image.size().toTuple() == (640, 120)\n\n\n# noinspection PyUnusedLocal\ndef test_save_md(qt_application: QApplication, tmp_path: Path):\n text_path = tmp_path / 'input.txt'\n text_path.write_text('''\nUntitled\n\nABCD\nE###\nF###\nG###\nH###\n\nABCD - Clue for abcd\nAEFGH - Clue for aefgh\n\nXXXX\nY###\nY###\nY###\nY###\n''')\n expected_markdown = '''\\\n## Untitled\nClue numbers are shuffled: 1 Across might not be the top left. 2 pieces.\n\nAcross@@\n**1.** Clue for abcd@@\n\nDown@@\n**1.** Clue for aefgh@@\n'''.replace('@', ' ')\n md_path = tmp_path / 'output.md'\n\n window = FourLetterBlocksWindow()\n\n window.open_file(text_path)\n\n window.export_md(md_path)\n\n markdown = md_path.read_text()\n\n assert markdown == expected_markdown\n\n\n# noinspection PyUnusedLocal\ndef test_save_md_with_reference(qt_application: QApplication, tmp_path: Path):\n text_path = tmp_path / 'input.txt'\n text_path.write_text('''\nBasic Puzzle\n\nWORD\nI##A\nN##S\nEACH\n\nWORD - Part of a sentence\nEACH - One at a time\nWINE - Sour grapes\nDASH - Run between WORD and a neighbour\n\nAABB\nA##B\nA##B\nCCCC\n''')\n expected_markdown = '''\\\n## Basic Puzzle\nClue numbers are shuffled: 1 Across might not be the top left. 3 pieces.\n\nAcross@@\n**1.** Part of a sentence@@\n**3.** One at a time@@\n\nDown@@\n**1.** Sour grapes@@\n**2.** Run between 1 Across and a neighbour@@\n'''.replace('@', ' ')\n md_path = tmp_path / 'output.md'\n\n window = FourLetterBlocksWindow()\n\n window.open_file(text_path)\n\n window.export_md(md_path)\n\n markdown = md_path.read_text()\n\n assert markdown == expected_markdown\n","repo_name":"donkirkby/four-letter-blocks","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"31751847491","text":"from setuptools import setup, find_packages\n\nwith open('README.md', 'r') as handle:\n long_description = handle.read()\n\nsetup(\n name='diskloaf',\n version='0.2.2',\n description='A tool for creating a large file (a loaf) in order to wipe a hard disk '\n 'written by someone who knows nothing about security',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='Andrew Blomenberg',\n author_email='andrewBlomen@gmail.com',\n url='https://github.com/Yook74/diskloaf',\n packages=['diskloaf'],\n install_requires=['progressbar2'],\n entry_points={\n 'console_scripts': ['diskloaf = diskloaf.loaf:main'],\n }\n)","repo_name":"Yook74/diskloaf","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2673633531","text":"from ruamel.yaml import YAML, YAMLError\n\nfrom . import Action, ActionError\n\n\n# Configure YAML parsing to be safe by default\nyaml = YAML(typ='safe')\n\n\ndef ruby_object(loader, node):\n return loader.construct_mapping(node)\n\n\n# Register YAML constructors to convert Ruby objects from gem specification to dicts\nfor ruby_type in [\n '!ruby/object:Gem::Specification',\n '!ruby/object:Gem::Requirement',\n '!ruby/object:Gem::Dependency',\n '!ruby/object:Gem::Version'\n]:\n yaml.Constructor.add_constructor(ruby_type, ruby_object)\n\n\nclass Gem(Action):\n \"\"\"\n Provides the ability to manage packages using the Ruby gem package manager.\n\n :param name: the name of the package\n :param version: the version of the package to install\n :param state: the state that the package must be in\n :param executable: the gem executable to use\n :param options: additional command line options to pass to the gem command\n \"\"\"\n\n def __init__(\n self, name, version=None, state='present', executable=None, options=None, **kwargs\n ):\n self._version = version\n self._state = state\n self.name = name\n self.version = version\n self.state = state\n self.executable = executable\n self.options = options\n super().__init__(**kwargs)\n\n @property\n def version(self):\n return self._version\n\n @version.setter\n def version(self, version):\n if self.state == 'latest' and version:\n raise ValueError(\n \"you may not request 'state' to be 'latest' and provide a 'version' argument\"\n )\n self._version = version\n\n @property\n def state(self):\n return self._state\n\n @state.setter\n def state(self, state):\n if state not in ['present', 'latest', 'absent']:\n raise ValueError('state must be present, latest or absent')\n if state == 'latest' and self.version:\n raise ValueError(\n \"you may not request 'state' to be 'latest' and provide a 'version' argument\"\n )\n self._state = state\n\n def process(self):\n # Determine the gem executable\n executable = self.executable if self.executable else 'gem'\n\n # Obtain the specification of the requested package containing all installed versions\n # of the requested package\n gem_spec_proc = self.run(\n [executable, 'specification', '--all', self.name], stdout=True, ignore_fail=True\n )\n\n # Check whether the package is installed and whether it is outdated\n if gem_spec_proc.returncode != 0:\n gem_installed = False\n else:\n gem_installed = True\n\n # Determine if the package is installed and/or outdated\n try:\n gem_spec = yaml.load_all(gem_spec_proc.stdout)\n gem_versions = [p['version']['version'] for p in gem_spec]\n\n if self.state == 'latest':\n # Obtain the latest package version details\n gem_spec_remote_proc = self.run(\n [executable, 'specification', '--remote', self.name],\n stdout=True, ignore_fail=True\n )\n gem_spec_remote = yaml.load(gem_spec_remote_proc.stdout)\n gem_remote_version = gem_spec_remote['version']['version']\n\n # Determine if the latest package is already installed\n gem_outdated = gem_remote_version not in gem_versions\n except (YAMLError, KeyError):\n raise ActionError('unable to parse installed package listing')\n\n # Prepare any user provided options\n options_list = self.options if self.options else []\n\n # Install, upgrade or remove the package as requested\n if self.state == 'present':\n if self.version:\n if gem_installed and self.version in gem_versions:\n return self.ok()\n else:\n self.run(\n [executable, 'install', '--version', self.version] +\n options_list + [self.name],\n fail_error='unable to install the requested package version'\n )\n return self.changed()\n else:\n if gem_installed:\n return self.ok()\n else:\n self.run(\n [executable, 'install'] + options_list + [self.name],\n fail_error='unable to install the requested package'\n )\n return self.changed()\n\n elif self.state == 'latest':\n if gem_installed and not gem_outdated:\n return self.ok()\n else:\n self.run(\n [executable, 'install'] + options_list + [self.name],\n fail_error='unable to install the requested package'\n )\n return self.changed()\n\n else: # 'absent'\n if not gem_installed:\n return self.ok()\n elif self.version:\n if self.version not in gem_versions:\n return self.ok()\n\n self.run(\n [executable, 'uninstall', '--version', self.version, '--executables'] +\n options_list + [self.name],\n fail_error='unable to remove the requested package version'\n )\n return self.changed()\n else:\n self.run(\n [executable, 'uninstall', '--all', '--executables'] +\n options_list + [self.name],\n fail_error='unable to remove the requested package'\n )\n return self.changed()\n","repo_name":"wasdee/elite","sub_path":"elite/actions/gem.py","file_name":"gem.py","file_ext":"py","file_size_in_byte":5861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27430505149","text":"from function import *\nimport json\nimport os\ncommand=os.system(\"aws workspaces describe-workspaces > inventaire.txt\")\ncommand=os.system(\"aws workspaces describe-workspace-directories > directory.txt\")\nwith open('inventaire.txt') as json_file:\n data = json.load(json_file)\n for p in data['Workspaces']:\n id = p['UserName']\n if id in open('ids.txt').read():\n #print(p['WorkspaceId'], p['DirectoryId'],p['UserName'],p['IpAddress'],p['State'],p['BundleId'], p['SubnetId'], p['ComputerName'])\n #print(p['WorkspaceId'], p['DirectoryId'],p['UserName'],p['BundleId'], p['State'])\n #print(p['UserName'],p['IpAddress'],p['ComputerName'],dir_code(p['DirectoryId']))\n print(p['UserName'],\" \\t\",p['IpAddress'],\" \\t\",p['ComputerName'],\" \\t\",dir_code(p['DirectoryId']),\" \\t\")\n #print(p['ComputerName'],end =\",\")\n\n","repo_name":"NAMMOUS/create-worksapces-with-python-script","sub_path":"get_workspaces_id.txt.py","file_name":"get_workspaces_id.txt.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22760646231","text":"from typing import Tuple\n\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\n\n\ndef load_data(file_path: str) -> Tuple[tf.Tensor, tf.Tensor]:\n \"\"\"\n Load data from csv file and return data and labels as tensors\n :param file_path: path to csv file\n :return: data and labels as tensors\n \"\"\"\n dataframe = pd.read_csv(file_path)\n data = dataframe.iloc[:, :-1].values\n labels = dataframe.iloc[:, -1].values.astype(bool)\n\n return data, labels\n\n\ndef normalise_data(\n test_data: tf.Tensor, train_data: tf.Tensor\n) -> Tuple[tf.Tensor, tf.Tensor]:\n \"\"\"\n Normalise data to values between 0 and 1 using min-max normalisation\n :param test_data: test data to normalise\n :param train_data: train data to normalise\n :return: normalised data\n \"\"\"\n min_value = tf.reduce_min(train_data)\n max_value = tf.reduce_max(train_data)\n\n train_data = (train_data - min_value) / (max_value - min_value)\n test_data = (test_data - min_value) / (max_value - min_value)\n\n train_data = tf.cast(train_data, dtype=tf.float32)\n test_data = tf.cast(test_data, dtype=tf.float32)\n\n return train_data, test_data\n\n\ndef separate_data_by_labels(\n data: tf.Tensor, labels: tf.Tensor\n) -> Tuple[tf.Tensor, tf.Tensor]:\n \"\"\"\n Separate data by labels into normal and anomalous data\n :param data: data to separate\n :param labels: labels to separate by\n :return: normal and anomalous data\n \"\"\"\n normal_data = data[labels]\n anomalous_data = data[~labels]\n\n return normal_data, anomalous_data\n\n\ndef preprocess_data(\n file_path: str, test_size: float = 0.2, random_state: int = 21\n) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:\n \"\"\"\n Preprocess data by loading, splitting, normalising and separating by labels\n :param file_path: the path to the csv file\n :param test_size: the size of the test set\n :param random_state: the random state to use for splitting the data\n :return: normal train data, anomalous train data, normal test data, anomalous test data\n \"\"\"\n # Load data\n data, labels = load_data(file_path)\n\n # Split data into train and test sets\n train_data, test_data, train_labels, test_labels = train_test_split(\n data, labels, test_size=test_size, random_state=random_state\n )\n\n # Normalise data\n normalised_train_data, normalised_test_data = normalise_data(\n train_data=train_data, test_data=test_data\n )\n\n # Separate data by labels\n normal_train_data, anomalous_train_data = separate_data_by_labels(\n normalised_train_data, train_labels\n )\n normal_test_data, anomalous_test_data = separate_data_by_labels(\n normalised_test_data, test_labels\n )\n\n return (\n normal_train_data,\n anomalous_train_data,\n normal_test_data,\n anomalous_test_data,\n )\n","repo_name":"WikGitHub/Anomaly-Detection","sub_path":"logic/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"20880704768","text":"#!/usr/bin/env python\n# @File: tests/test_io.py\n# @Author: Niccolo' Bonacchi (@nbonacchi)\n# @Date: Tuesday, April 18th 2023, 10:29:52 am\n\nimport json\nimport tempfile\nimport unittest\nfrom pathlib import Path\nfrom genemede.io import create, read, update, backup\n\n\n# Implement tests for genemede.io functions\nclass TestIO(unittest.TestCase):\n def setUp(self):\n self.temp_dir = Path(tempfile.mkdtemp())\n self.test_data = [\n {\n \"guid\": \"test_guid\",\n \"datetime\": \"test_datetime\",\n \"name\": \"test_name\",\n \"description\": \"test_description\",\n \"mtype\": \"test_mtype\",\n \"resources\": [\n {\"guid\": \"test_guid\", \"datetime\": \"test_datetime\", \"name\": \"test_name\"}\n ],\n \"properties\": [\n {\"guid\": \"test_guid\", \"datetime\": \"test_datetime\", \"name\": \"test_name\"}\n ],\n \"custom\": [\n {\"guid\": \"test_guid\", \"datetime\": \"test_datetime\", \"name\": \"test_name\"}\n ],\n \"bids\": [{\"guid\": \"test_guid\", \"datetime\": \"test_datetime\", \"name\": \"test_name\"}],\n },\n ]\n\n def test_create(self):\n create(self.temp_dir / \"test_file.gnmd\", self.test_data)\n self.assertTrue(self.temp_dir.joinpath(\"test_file.gnmd\").exists())\n self.assertTrue(self.temp_dir.joinpath(\"test_file.gnmd\").is_file())\n\n def test_read(self):\n create(self.temp_dir / \"test_file.gnmd\", self.test_data)\n e = read(self.temp_dir / \"test_file.gnmd\")\n self.assertIsNotNone(e)\n self.assertEqual(len(e), len(self.test_data))\n self.assertEqual(e[0][\"guid\"], self.test_data[0][\"guid\"])\n self.assertEqual(e[0] == self.test_data[0], True)\n\n def test_backup(self):\n # Create file\n create(self.temp_dir / \"test_file.gnmd\", self.test_data)\n # Backup created file\n backup(self.temp_dir / \"test_file.gnmd\")\n # List all files in temp_dir\n files = self.temp_dir.glob(\"*.gnmd\")\n self.assertTrue(len([x for x in files if \"test_file\" in x.name]) >= 2)\n","repo_name":"genemede/genemede","sub_path":"tests/test_io.py","file_name":"test_io.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"1824222992","text":"from argparse import ArgumentParser\nfrom fload_freedb.stream.base import FreedbDocOperatePipeline\nimport re\nimport os\n\nfrom fload import Pipeline, base\nfrom fload_freedb.freedb import DocumentDotExist, FreedbClient, FreedbCollection\n\n\nclass FreedbDeleteDoc(FreedbDocOperatePipeline):\n exist_policy: str = 'skip'\n ignore_all = False\n ignore_doc_not_exist = False\n\n def add_arguments(self, parser:ArgumentParser):\n super().add_arguments(parser)\n parser.add_argument('--exist', choices=['skip', 'overwrite', 'merge'], default='skip')\n parser.add_argument('--ignore-all', '-g', action='store_true', default=False, \n help='ignore all exception')\n parser.add_argument('--ignore-doc-not-exist', action='store_true', default=False, \n help='ignore doc not exist exception.')\n\n\n def init(self, ops):\n super().init(ops)\n self.exist_policy = ops.exist\n self.ignore_all = ops.ignore_all\n self.ignore_doc_not_exist = ops.ignore_doc_not_exist\n\n def process(self, item):\n doc_id = item.get('id')\n try:\n self.col.delete_doc(doc_id)\n except DocumentDotExist:\n if self.ignore_doc_not_exist or self.ignore_all:\n return\n raise\n except Exception:\n if self.ignore_all:\n return\n raise\n","repo_name":"kevenli/fload-freedb","sub_path":"fload_freedb/stream/freedbdelete.py","file_name":"freedbdelete.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15980546968","text":"\"\"\"\nTests for the Bulk Enrollment views.\n\"\"\"\nimport json\n\nfrom courseware.tests.helpers import LoginEnrollmentTestCase\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import User\nfrom django.contrib.sites.models import Site\nfrom django.core import mail\nfrom django.urls import reverse\nfrom extended_api.serializers import BulkEnrollmentSerializer\nfrom microsite_configuration import microsite\nfrom rest_framework.test import APITestCase, force_authenticate\nfrom student.models import (\n ManualEnrollmentAudit,\n ENROLLED_TO_UNENROLLED,\n UNENROLLED_TO_ENROLLED,\n)\nfrom student.models import UserProfile, CourseEnrollment\nfrom student.tests.factories import UserFactory\nfrom xmodule.modulestore.django import modulestore\nfrom xmodule.modulestore.tests.django_utils import ModuleStoreTestCase\nfrom xmodule.modulestore.tests.factories import XMODULE_FACTORY_LOCK\n\nfrom lms.djangoapps.course_api.tests.mixins import CourseApiFactoryMixin\nfrom openedx.core.djangoapps.content.course_overviews.models import CourseOverview\nfrom openedx.core.djangoapps.site_configuration.models import SiteConfiguration\n\nUser = get_user_model()\ntest_config_multi_org = { # pylint: disable=invalid-name\n \"course_org_filter\": [\"FooOrg\", \"BarOrg\", \"FooBarOrg\"]\n}\n\n\ndef create_mock_site_config():\n site, __ = Site.objects.get_or_create(domain=\"example.com\", name=\"example.com\")\n site_configuration, created = SiteConfiguration.objects.get_or_create(\n site=site,\n defaults={\"enabled\": True, \"values\": test_config_multi_org},\n )\n if not created:\n site_configuration.values = test_config_multi_org\n site_configuration.save()\n\n\nclass BulkEnrollmentTest(CourseApiFactoryMixin, ModuleStoreTestCase, LoginEnrollmentTestCase, APITestCase):\n \"\"\"\n Test the bulk enrollment endpoint\n \"\"\"\n shard = 4\n\n USERNAME = \"Bob\"\n EMAIL = \"bob@example.com\"\n PASSWORD = \"edx\"\n\n def setUp(self):\n \"\"\" Create a course and user, then log in. \"\"\"\n\n create_mock_site_config()\n\n XMODULE_FACTORY_LOCK.enable()\n\n if not CourseOverview.objects.all() and modulestore().get_courses():\n CourseOverview.load_from_module_store(modulestore().get_courses()[0].id)\n else:\n self.create_course()\n\n self.staff = User.objects.create(\n username=self.USERNAME,\n email=self.EMAIL,\n password=self.PASSWORD,\n is_staff=True,\n is_superuser=True,\n\n )\n UserProfile.objects.create(\n user=self.staff,\n org=\"FooOrg\"\n )\n self.client.force_authenticate(user=self.staff)\n self.url = reverse('extended_api:bulk_enroll')\n\n self.course = CourseOverview.objects.first()\n self.course_key = unicode(self.course.id)\n self.course.org = \"FooOrg\"\n self.course.save()\n self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')\n CourseEnrollment.enroll(\n self.enrolled_student,\n self.course.id\n )\n self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled',\n last_name='Student')\n\n # Email URL values\n self.site_name = microsite.get_value(\n 'SITE_NAME',\n settings.SITE_NAME\n )\n self.about_path = '/courses/{}/about'.format(self.course.id)\n self.course_path = '/courses/{}/'.format(self.course.id)\n\n def request_bulk_enroll(self, data=None, use_json=False, **extra):\n \"\"\" Make an authenticated request to the bulk enrollment API. \"\"\"\n content_type = None\n if use_json:\n content_type = 'application/json'\n data = json.dumps(data)\n request = self.request_factory.post(self.url, data=data, content_type=content_type, **extra)\n force_authenticate(request, user=self.staff)\n response = self.view(request)\n response.render()\n return response\n\n def test_course_list_serializer(self):\n \"\"\"\n Test that the course serializer will work when passed a string or list.\n\n Internally, DRF passes the data into the value conversion method as a list instead of\n a string, so StringListField needs to work with both.\n \"\"\"\n for key in [self.course_key, [self.course_key]]:\n serializer = BulkEnrollmentSerializer(data={\n 'identifiers': 'percivaloctavius',\n 'action': 'enroll',\n 'email_students': False,\n 'courses': key,\n })\n self.assertTrue(serializer.is_valid())\n\n def test_non_staff(self):\n \"\"\" Test that non global staff users are forbidden from API use. \"\"\"\n self.staff.is_staff = False\n self.staff.save()\n # response = self.request_bulk_enroll()\n response = self.client.post(self.url, format='json')\n self.assertEqual(response.status_code, 403)\n\n def test_missing_params(self):\n \"\"\" Test the response when missing all query parameters. \"\"\"\n response = self.client.post(self.url, format='json')\n self.assertEqual(response.status_code, 400)\n\n def test_bad_action(self):\n \"\"\" Test the response given an invalid action \"\"\"\n data = {\n 'emails': self.enrolled_student.email,\n 'action': 'invalid-action',\n 'courses': self.course_key,\n }\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, 400)\n\n def test_invalid_email(self):\n \"\"\" Test the response given an invalid email. \"\"\"\n data = {\n 'emails': 'percivaloctavius@',\n 'action': 'enroll',\n 'email_students': False,\n 'courses': self.course_key,\n }\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, 200)\n\n # test the response data\n expected = {\n \"action\": \"enroll\",\n 'email_students': False,\n \"courses\": {\n self.course_key: {\n \"action\": \"enroll\",\n \"results\": [\n {\n \"identifier\": {\"email\": 'percivaloctavius@'},\n \"invalidIdentifier\": True,\n }\n ]\n }\n }\n }\n\n res_json = json.loads(response.content)\n self.assertEqual(res_json, expected)\n\n def test_invalid_username(self):\n \"\"\" Test the response given an invalid username. \"\"\"\n data = {\n 'usernames': 'percivaloctavius',\n 'action': 'enroll',\n 'email_students': False,\n 'courses': self.course_key,\n }\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, 200)\n\n # test the response data\n expected = {\n \"action\": \"enroll\",\n 'email_students': False,\n \"courses\": {\n self.course_key: {\n \"action\": \"enroll\",\n \"results\": [\n {\n \"identifier\": {\"username\": 'percivaloctavius'},\n \"invalidIdentifier\": True,\n }\n ]\n }\n }\n }\n\n res_json = json.loads(response.content)\n self.assertEqual(res_json, expected)\n\n def test_invalid_user_id(self):\n \"\"\" Test the response given an invalid user_id. \"\"\"\n data = {\n 'user_ids': '-1',\n 'action': 'enroll',\n 'email_students': False,\n 'courses': self.course_key,\n }\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, 200)\n\n # test the response data\n expected = {\n \"action\": \"enroll\",\n 'email_students': False,\n \"courses\": {\n self.course_key: {\n \"action\": \"enroll\",\n \"results\": [\n {\n \"identifier\": {\"user_id\": '-1'},\n \"invalidIdentifier\": True,\n }\n ]\n }\n }\n }\n\n res_json = json.loads(response.content)\n self.assertEqual(res_json, expected)\n\n def test_enroll_with_username(self):\n \"\"\" Test enrolling using usernames. \"\"\"\n data = {\n 'usernames': self.notenrolled_student.username,\n 'action': 'enroll',\n 'email_students': False,\n 'courses': self.course_key,\n }\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, 200)\n\n # test the response data\n expected = {\n \"action\": \"enroll\",\n \"email_students\": False,\n \"courses\": {\n self.course_key: {\n \"action\": \"enroll\",\n \"results\": [\n {\n \"identifier\": {\"username\": self.notenrolled_student.username},\n \"before\": {\n \"enrollment\": False,\n \"user\": True,\n },\n \"after\": {\n \"enrollment\": True,\n \"user\": True,\n }\n }\n ]\n }\n }\n }\n manual_enrollments = ManualEnrollmentAudit.objects.all()\n self.assertEqual(manual_enrollments.count(), 1)\n self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)\n res_json = json.loads(response.content)\n self.assertEqual(res_json, expected)\n\n def test_enroll_with_email(self):\n \"\"\" Test enrolling using emails. \"\"\"\n data = {\n 'emails': self.notenrolled_student.email,\n 'action': 'enroll',\n 'email_students': False,\n 'courses': self.course_key,\n }\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, 200)\n\n # test that the user is now enrolled\n user = User.objects.get(email=self.notenrolled_student.email)\n self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))\n\n # test the response data\n expected = {\n \"action\": \"enroll\",\n \"email_students\": False,\n \"courses\": {\n self.course_key: {\n \"action\": \"enroll\",\n \"results\": [\n {\n \"identifier\": {\"email\": self.notenrolled_student.email},\n \"before\": {\n \"enrollment\": False,\n \"user\": True,\n },\n \"after\": {\n \"enrollment\": True,\n \"user\": True,\n }\n }\n ]\n }\n }\n }\n\n manual_enrollments = ManualEnrollmentAudit.objects.all()\n self.assertEqual(manual_enrollments.count(), 1)\n self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)\n res_json = json.loads(response.content)\n self.assertEqual(res_json, expected)\n\n # Check the outbox\n self.assertEqual(len(mail.outbox), 0)\n\n def test_enroll_with_user_id(self):\n \"\"\" Test enrolling using user_ids. \"\"\"\n data = {\n 'user_ids': unicode(self.notenrolled_student.id),\n 'action': 'enroll',\n 'email_students': False,\n 'courses': self.course_key,\n }\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, 200)\n\n # test that the user is now enrolled\n user = User.objects.get(email=self.notenrolled_student.email)\n self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))\n\n # test the response data\n expected = {\n \"action\": \"enroll\",\n \"email_students\": False,\n \"courses\": {\n self.course_key: {\n \"action\": \"enroll\",\n \"results\": [\n {\n \"identifier\": {\"user_id\": unicode(self.notenrolled_student.id)},\n \"before\": {\n \"enrollment\": False,\n \"user\": True,\n },\n \"after\": {\n \"enrollment\": True,\n \"user\": True,\n }\n }\n ]\n }\n }\n }\n\n manual_enrollments = ManualEnrollmentAudit.objects.all()\n self.assertEqual(manual_enrollments.count(), 1)\n self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)\n res_json = json.loads(response.content)\n self.assertEqual(res_json, expected)\n\n # Check the outbox\n self.assertEqual(len(mail.outbox), 0)\n\n def test_unenroll(self):\n \"\"\" Test unenrolling users using emails. \"\"\"\n data = {\n 'emails': self.enrolled_student.email,\n 'action': 'unenroll',\n 'email_students': False,\n 'courses': self.course_key,\n }\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, 200)\n\n # test that the user is now unenrolled\n user = User.objects.get(email=self.enrolled_student.email)\n self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))\n\n # test the response data\n expected = {\n \"action\": \"unenroll\",\n \"email_students\": False,\n \"courses\": {\n self.course_key: {\n \"action\": \"unenroll\",\n \"results\": [\n {\n \"identifier\": {\"email\": self.enrolled_student.email},\n \"before\": {\n \"enrollment\": True,\n \"user\": True,\n },\n \"after\": {\n \"enrollment\": False,\n \"user\": True,\n }\n }\n ]\n }\n }\n\n }\n\n manual_enrollments = ManualEnrollmentAudit.objects.all()\n self.assertEqual(manual_enrollments.count(), 1)\n self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_UNENROLLED)\n res_json = json.loads(response.content)\n self.assertEqual(res_json, expected)\n\n # Check the outbox\n self.assertEqual(len(mail.outbox), 0)\n","repo_name":"BarryPaneer/platform","sub_path":"lms/djangoapps/extended_api/tests/test_bulk_enroll.py","file_name":"test_bulk_enroll.py","file_ext":"py","file_size_in_byte":15412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10671918049","text":"############### Blackjack Project #####################\n\n#Difficulty Normal 😎: Use all Hints below to complete the project.\n#Difficulty Hard 🤔: Use only Hints 1, 2, 3 to complete the project.\n#Difficulty Extra Hard 😭: Only use Hints 1 & 2 to complete the project.\n#Difficulty Expert 🤯: Only use Hint 1 to complete the project.\n\n############### Our Blackjack House Rules #####################\n\n## The deck is unlimited in size. \n## There are no jokers. \n## The Jack/Queen/King all count as 10.\n## The the Ace can count as 11 or 1.\n## Use the following list as the deck of cards:\n## cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n## The cards in the list have equal probability of being drawn.\n## Cards are not removed from the deck as they are drawn.\n## The computer is the dealer.\n\n##################### Hints #####################\n\n#Hint 1: Go to this website and try out the Blackjack game: \n# https://games.washingtonpost.com/games/blackjack/\n#Then try out the completed Blackjack project here: \n# http://blackjack-final.appbrewery.repl.run\n\n#Hint 2: Read this breakdown of program requirements: \n# http://listmoz.com/view/6h34DJpvJBFVRlZfJvxF\n#Then try to create your own flowchart for the program.\n\n#Hint 3: Download and read this flow chart I've created: \n# https://drive.google.com/uc?export=download&id=1rDkiHCrhaf9eX7u7yjM1qwSuyEk-rPnt\n\n#Hint 4: Create a deal_card() function that uses the List below to *return* a random card.\n#11 is the Ace.\n#cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n\nfrom replit import clear\nfrom art import logo \n\ndef blackjack(): # Create function which will execute the game when called ----> so that i can restart the game when user wants to ( PROGRAM INDENTED UNDER THIS )\n print(logo)\n\n import random \n def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n card_randomly_chosen = random.choice(cards) # Randomly choose a number from the list \n return card_randomly_chosen\n\n#Hint 5: Deal the user and computer 2 cards each using deal_card() and append().\n user_cards = []\n user_cards.append(deal_card()) # Randomly draw first card\n user_cards.append(deal_card()) # Randomly draw second card\n\n computer_cards = []\n computer_cards.append(deal_card()) # Randomly draw first card\n computer_cards.append(deal_card()) # Randomly draw second card\n\n#Hint 6: Create a function called calculate_score() that takes a List of cards as input \n#and returns the score. \n#Look up the sum() function to help you do this.\n def calculate_score(list_of_cards):\n score = sum(list_of_cards)\n if score > 21 and 11 in list_of_cards: # HINT 8\n list_of_cards.remove(11)\n list_of_cards.append(1)\n score = sum(list_of_cards)\n return score \n if len(list_of_cards) == 2 and score == 21: # HINT 7: Scenario where 2 cards and score = 21\n return 0\n else:\n return score \n\n user_score = calculate_score(list_of_cards = user_cards) # Score for user\n computer_score = calculate_score(list_of_cards = computer_cards) # Score for computer \n\n print(f\" Your cards: {user_cards}, current score: {user_score}\" )\n print(f\" Computer's first card: {computer_cards[0]}\")\n\n\n def draw_card_again(): # Create function that will add another card to the list for user\n user_cards.append(deal_card()) # Draw another card for user --> adds another card to the list\n \n def computer_draw_card_again(): # Create function which adds card to computer's list\n computer_cards.append(deal_card()) \n\n while computer_score < 17: # HINT 12\n computer_draw_card_again() \n computer_score = calculate_score(list_of_cards = computer_cards)\n\n def final_winner(): # Create function which will print whose the winner\n if user_score > 21 and computer_score > 21: # If both exceed 21\n print(\"It's a draw! You both exceeded 21!\")\n elif user_score < 22 and user_score > computer_score:\n print(\"Congrats, you won! You have more points! \")\n elif user_score == computer_score:\n print(\"It's a draw! \")\n elif user_score > 21 and user_score > computer_score:\n print(\"Awww, you lost! You have exceeded 21!\")\n elif computer_score > user_score and computer_score > 21:\n print('Congrats, you won! Computer has exceeded 21!')\n elif user_score < computer_score:\n print('Awww, you lost! Computer has more points!')\n\n restart_game = input(\"Do you want to restart the game? Type 'yes' to restart or 'no' if otherwise: \").lower() # This will be asked to the user whenever final_winner() is declared \n if restart_game == 'yes':\n clear()\n blackjack() # Call back the program (RESTART) \n elif restart_game == 'no':\n print(\"Ok, goodbye!\")\n\n \n if user_score == 0 or computer_score == 0: # HINT 9: Scenario where computer or user has blackjack\n if user_score > computer_score:\n print(f\"Your final hand: {user_cards}, final score: {user_score}\")\n print(f\"Computer's final hand: {computer_cards}, final score: {computer_score}\")\n print(\"Game over! Computer has gotten blackjack 21 points and won!\")\n restart_game = input(\"Do you want to restart the game? Type 'yes' to restart or 'no' if otherwise: \").lower() # This will be asked to the user whenever a winner is declared (from winning from blackjack 21 points)\n if restart_game == 'yes':\n clear()\n blackjack() # Call back the program (RESTART) \n elif restart_game == 'no':\n print(\"Ok, goodbye!\")\n elif user_score < computer_score:\n print(f\"Your final hand: {user_cards}, final score: {user_score}\")\n print(f\"Computer's final hand: {computer_cards}, final score: {computer_score}\")\n print(\"Game over! User has gotten blackjack 21 points and won!\")\n restart_game = input(\"Do you want to restart the game? Type 'yes' to restart or 'no' if otherwise: \").lower() # This will be asked to the user whenever a winner is declared (from winning from blackjack 21 points)\n if restart_game == 'yes':\n clear()\n blackjack() # Call back the program (RESTART) \n elif restart_game == 'no':\n print(\"Ok, goodbye!\")\n else:\n print(f\"Your final hand: {user_cards}, final score: {user_score}\")\n print(f\"Computer's final hand: {computer_cards}, final score: {computer_score}\")\n print(\"It's a draw! Both user and computer have gotten blackjack 21 points!\")\n restart_game = input(\"Do you want to restart the game? Type 'yes' to restart or 'no' if otherwise: \").lower() # This will be asked to the user whenever a winner is declared (from winning from blackjack 21 points)\n if restart_game == 'yes':\n clear()\n blackjack() # Call back the program (RESTART) \n elif restart_game == 'no':\n print(\"Ok, goodbye!\")\n else: # If game not ended, where nobody got blackjack or points > 21\n while user_score < 21: # while user score always below 21, ask whether to draw another card\n draw_another_card_for_user = input(\"Type 'y' to draw another card. Type 'n' to pass: \").lower() # HINT 10\n if draw_another_card_for_user == 'y': # want to draw while user score still below 21\n draw_card_again()\n user_score = calculate_score(list_of_cards = user_cards) # New score when draw another card\n print(f\"Your cards: {user_cards}, current score: {user_score}\")\n print(f\" Computer's first card: {computer_cards[0]}\")\n if user_score == 21: # Score AFTER DRAWING ADDITIONAL CARD\n print(f\"Your final hand: {user_cards}, final score: {user_score}\")\n print(f\"Computer's final hand: {computer_cards}, final score: {computer_score}\")\n final_winner()\n elif user_score > 21: # SCORE AFTER DRAWING ADDITIONAL CARD\n print(f\"Your final hand: {user_cards}, final score: {user_score}\")\n print(f\"Computer's final hand: {computer_cards}, final score: {computer_score}\")\n final_winner()\n elif draw_another_card_for_user == 'n': # elif never draw additional card at all\n print(f\"Your final hand: {user_cards}, final score: {user_score}\")\n print(f\"Computer's final hand: {computer_cards}, final score: {computer_score}\")\n final_winner()\n break # break out of while loop so program will end \n if draw_another_card_for_user == 'n': # don't want to draw while user score stll below 21\n print(f\"Your final hand: {user_cards}, final score: {user_score}\")\n print(f\"Computer's final hand: {computer_cards}, final score: {computer_score}\")\n final_winner()\n break # break out of while loop so program will end\n\n\nblackjack() # This will start the program running the first time \n\n\n\n#Hint 7: Inside calculate_score() check for a blackjack (a hand with only 2 cards: ace + 10) and return 0 instead of the actual score. 0 will represent a blackjack in our game.\n\n#Hint 8: Inside calculate_score() check for an 11 (ace). If the score is already over 21, remove the 11 and replace it with a 1. You might need to look up append() and remove().\n\n#Hint 9: Call calculate_score(). If the computer or the user has a blackjack (0) or if the user's score is over 21, then the game ends.\n\n#Hint 10: If the game has not ended, ask the user if they want to draw another card. If yes, then use the deal_card() function to add another card to the user_cards List. If no, then the game has ended.\n\n#Hint 11: The score will need to be rechecked with every new card drawn and the checks in Hint 9 need to be repeated until the game ends.\n\n#Hint 12: Once the user is done, it's time to let the computer play. The computer should keep drawing cards as long as it has a score less than 17.\n\n#Hint 13: Create a function called compare() and pass in the user_score and computer_score. If the computer and user both have the same score, then it's a draw. If the computer has a blackjack (0), then the user loses. If the user has a blackjack (0), then the user wins. If the user_score is over 21, then the user loses. If the computer_score is over 21, then the computer loses. If none of the above, then the player with the highest score wins.\n\n#Hint 14: Ask the user if they want to restart the game. If they answer yes, clear the console and start a new game of blackjack and show the logo from art.py.\n\n","repo_name":"Cyrus-Tan/Blackjack-game-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41695333044","text":"#!/usr/bin/python3\n\"\"\"\nGiven a string containing only three types of characters: '(', ')' and '*',\nwrite a function to check whether this string is valid. We define the validity\nof a string by these rules:\n\nAny left parenthesis '(' must have a corresponding right parenthesis ')'.\nAny right parenthesis ')' must have a corresponding left parenthesis '('.\nLeft parenthesis '(' must go before the corresponding right parenthesis ')'.\n'*' could be treated as a single right parenthesis ')' or a single left\nparenthesis '(' or an empty string.\nAn empty string is also valid.\n\nExample 1:\nInput: \"()\"\nOutput: True\nExample 2:\nInput: \"(*)\"\nOutput: True\nExample 3:\nInput: \"(*))\"\nOutput: True\nNote:\nThe string size will be in the range [1, 100].\n\"\"\"\n\n\nclass Solution:\n def checkValidString(self, s: str) -> bool:\n \"\"\"\n Brute force: dfs branching on \"*\".\n\n Better Solution:\n keep two stack: stak of \"(\" and stack of \"*\"\n \"\"\"\n stk_left = []\n stk_star = []\n for i, c in enumerate(s):\n if c == \"(\":\n stk_left.append(i)\n elif c == \"*\":\n stk_star.append(i)\n else:\n if stk_left:\n stk_left.pop()\n elif stk_star:\n stk_star.pop()\n else:\n return False\n\n while stk_left and stk_star and stk_star[-1] > stk_left[-1]:\n stk_star.pop()\n stk_left.pop()\n\n return not stk_left\n\n\nif __name__ == \"__main__\":\n assert Solution().checkValidString(\"(*))\") == True\n assert Solution().checkValidString(\"*(\") == False\n assert Solution().checkValidString(\"(*)\") == True\n","repo_name":"algorhythms/LeetCode","sub_path":"678 Valid Parenthesis String.py","file_name":"678 Valid Parenthesis String.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":843,"dataset":"github-code","pt":"81"} +{"seq_id":"69988842826","text":"from .circular_apodization import circular_apodization\nimport heeps.util.img_processing as impro\nimport proper\nimport numpy as np\nimport os.path\nfrom astropy.io import fits \n\ndef apodizer(wf, mode='RAVC', ravc_t=0.8, ravc_r=0.6, ngrid=1024, npupil=285,\n f_app_amp='', f_app_phase='', f_ravc_amp='', f_ravc_phase='',\n apo_misalign=None, onaxis=True, verbose=False, save_ring=False, **conf):\n\n ''' Create a wavefront object at the entrance pupil plane.\n The pupil is either loaded from a fits file, or created using\n pupil parameters.\n Can also select only one petal and mask the others.\n\n wf: WaveFront\n PROPER wavefront object\n mode: str\n HCI mode\n ravc_t: float\n RA transmittance\n ravc_r: float\n RA radius\n ngrid: int\n number of pixels of the wavefront array\n npupil: int\n number of pixels of the pupil\n f_app_amp: str\n f_app_phase: str \n apodizing phase plate files\n f_ravc_amp: str\n f_ravc_phase: str \n ring apodizer files (optional)\n apo_misalign: list of float\n apodizer misalignment\n\n '''\n\n # case 1: Ring Apodizer\n if 'RAVC' in mode and ravc_r > 0:\n\n # load apodizer from files if provided\n if os.path.isfile(f_ravc_amp) and os.path.isfile(f_ravc_phase):\n if verbose is True:\n print(' apply ring apodizer from files')\n # get amplitude and phase data\n RAVC_amp = fits.getdata(f_ravc_amp)\n RAVC_phase = fits.getdata(f_ravc_phase)\n # resize to npupil\n RAVC_amp = impro.resize_img(RAVC_amp, npupil)\n RAVC_phase = impro.resize_img(RAVC_phase, npupil)\n # pad with zeros to match PROPER gridsize\n RAVC_amp = impro.pad_img(RAVC_amp, ngrid)\n RAVC_phase = impro.pad_img(RAVC_phase, ngrid)\n # build complex apodizer\n ring = RAVC_amp*np.exp(1j*RAVC_phase)\n\n # else, define the apodizer as a ring (with % misalignments)\n else:\n # RAVC misalignments\n dx, dy = [0, 0] if apo_misalign is None else list(apo_misalign)[0:2]\n # create apodizer\n ring = circular_apodization(wf, ravc_r, 1, ravc_t, xc=dx, \n yc=dy, NORM=True)\n if save_ring is True:\n fits.writeto('apo_ring_r=%.4f_t=%.4f.fits'%(ravc_r, ravc_t),\n impro.crop_img(ring, npupil), overwrite=True)\n if verbose is True:\n print(' apply ring apodizer: ravc_t=%s, ravc_r=%s'\n %(round(ravc_t, 4), round(ravc_r, 4))\n + ', apo_misalign=%s'%apo_misalign)\n\n # multiply the loaded apodizer\n proper.prop_multiply(wf, ring)\n\n # case 2: Apodizing Phase Plate\n elif 'APP' in mode:\n # get amplitude and phase data\n if os.path.isfile(f_app_amp):\n if verbose is True:\n print(\" apply APP stop from '%s'\"%os.path.basename(f_app_amp))\n APP_amp = fits.getdata(f_app_amp)\n else:\n APP_amp = np.ones((npupil, npupil))\n if os.path.isfile(f_app_phase) and onaxis == True:\n if verbose is True:\n print(\" apply APP phase from '%s'\"%os.path.basename(f_app_phase))\n APP_phase = fits.getdata(f_app_phase)\n else:\n APP_phase = np.zeros((npupil, npupil))\n # resize to npupil\n APP_amp = impro.resize_img(APP_amp, npupil)\n APP_phase = impro.resize_img(APP_phase, npupil)\n # rotate for negative PSF\n if 'neg' in mode:\n APP_amp = np.rot90(APP_amp, 2)\n APP_phase = np.rot90(APP_phase, 2)\n # pad with zeros to match PROPER ngrid\n APP_amp = impro.pad_img(APP_amp, ngrid, 0)\n APP_phase = impro.pad_img(APP_phase, ngrid, 0)\n \n # multiply the loaded APP\n proper.prop_multiply(wf, APP_amp*np.exp(1j*APP_phase))\n\n return wf","repo_name":"vortex-exoplanet/HEEPS","sub_path":"heeps/optics/apodizer.py","file_name":"apodizer.py","file_ext":"py","file_size_in_byte":3956,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"36455089220","text":"from fileReaderWriter import *\nfrom ships import *\nimport button\nimport random\nfrom powerups import *\n\ndef instructionsScreen ():\n run = True\n while run:\n WIN.blit(BG, (0, 0))\n # Writes this text:\n information_label1 = gameFont5.render(\"Press W,A,S,D to move up, left, down, and right.\", 1, (white_color))\n WIN.blit(information_label1, (30, 30))\n\n #Writes this text:\n information_label2 = gameFont5.render(\"Press spacebar to fire your weapon and destroy alien ships\", 1, (white_color))\n WIN.blit(information_label2, (30, information_label1.get_height() + 40))\n\n # Writes this text:\n information_label3 = gameFont5.render(\"Occasional ships with powerups will be sent to you\", 1, (white_color))\n WIN.blit(information_label3, (30, information_label1.get_height() + information_label2.get_height() +60))\n\n information_label4 = gameFont5.render(\"Enemy Space Crafts:\", 1, (white_color))\n WIN.blit(information_label4, (30, information_label1.get_height() + information_label2.get_height() +information_label3.get_height() +120))\n\n WIN.blit(GREEN_SPACE_SHIP, (50,information_label1.get_height() + information_label2.get_height() +information_label3.get_height() + information_label4.get_height() + 150))\n WIN.blit(RED_SPACE_SHIP, (GREEN_SPACE_SHIP.get_width() + 70,information_label1.get_height() + information_label2.get_height() +information_label3.get_height() + information_label4.get_height() + 150))\n WIN.blit(PINK_SPACE_SHIP, (GREEN_SPACE_SHIP.get_width() + RED_SPACE_SHIP.get_width() + 85,information_label1.get_height() + information_label2.get_height() +information_label3.get_height() + information_label4.get_height() + 150))\n\n information_label5 = gameFont5.render(\"Powerups:\", 1, (white_color))\n WIN.blit(information_label5, (30, information_label1.get_height() + information_label2.get_height() +information_label3.get_height() + information_label4.get_height() + GREEN_SPACE_SHIP.get_height() + RED_SPACE_SHIP.get_height() + PINK_SPACE_SHIP.get_height() - 80))\n\n WIN.blit(HEALTHPOWERUP, (80, information_label1.get_height() + information_label2.get_height() +information_label3.get_height() + information_label4.get_height() + GREEN_SPACE_SHIP.get_height() + RED_SPACE_SHIP.get_height() + PINK_SPACE_SHIP.get_height() + information_label5.get_height() - 50))\n information_label6 = gameFont5.render(\"+10 health\", 1, (white_color))\n WIN.blit(information_label6, (30, information_label1.get_height() + information_label2.get_height() + information_label3.get_height() + information_label4.get_height() + GREEN_SPACE_SHIP.get_height() + RED_SPACE_SHIP.get_height() + PINK_SPACE_SHIP.get_height() + HEALTHPOWERUP.get_height() + 20))\n\n WIN.blit(SPEEDPOWERUP, (25 + HEALTHPOWERUP.get_width() + 170, information_label1.get_height() + information_label2.get_height() +information_label3.get_height() + information_label4.get_height() + GREEN_SPACE_SHIP.get_height() + RED_SPACE_SHIP.get_height() + PINK_SPACE_SHIP.get_height() + information_label5.get_height() - 50))\n information_label7 = gameFont5.render(\"+2 speed\", 1, (white_color))\n WIN.blit(information_label7, (30 + information_label6.get_width() + 25, information_label1.get_height() + information_label2.get_height() + information_label3.get_height() + information_label4.get_height() + GREEN_SPACE_SHIP.get_height() + RED_SPACE_SHIP.get_height() + PINK_SPACE_SHIP.get_height() + HEALTHPOWERUP.get_height() + 20))\n\n WIN.blit(COOLDOWNPOWERUP, (HEALTHPOWERUP.get_width() + 120 + SPEEDPOWERUP.get_width() + 250, information_label1.get_height() + information_label2.get_height() +information_label3.get_height() + information_label4.get_height() + GREEN_SPACE_SHIP.get_height() + RED_SPACE_SHIP.get_height() + PINK_SPACE_SHIP.get_height() + information_label5.get_height() - 50))\n information_label8 = gameFont5.render(\"-15 laser cooldown\", 1, (white_color))\n WIN.blit(information_label8, (30 + information_label6.get_width() + 25 + information_label7.get_width() + 30, information_label1.get_height() + information_label2.get_height() + information_label3.get_height() + information_label4.get_height() + GREEN_SPACE_SHIP.get_height() + RED_SPACE_SHIP.get_height() + PINK_SPACE_SHIP.get_height() + HEALTHPOWERUP.get_height() + 20))\n\n information_label9 = gameFont5.render(\"Powerups last 15 seconds\", 1 ,(white_color))\n WIN.blit(information_label9, (30, HEIGHT - 50))\n\n\n #Main Menu Label\n main_menu_label = gameFont3.render(\"Main Menu\", 1, (white_color))\n WIN.blit (main_menu_label, (WIDTH/2-main_menu_label.get_width()/2, HEIGHT- 55))\n\n #Main Menu Button\n main_menu_picture = pygame.transform.scale(pygame.image.load('buttonPicture.png').convert_alpha(), (main_menu_label.get_width(), main_menu_label.get_height() - 10))\n main_menu_button = button.Button(WIDTH/2-main_menu_label.get_width()/2, HEIGHT- 55, main_menu_picture, 1)\n if main_menu_button.draw(WIN): main_menu()\n\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n pygame.quit()\n\ndef space_logs_screen ():\n run = True\n while run:\n WIN.blit(BG, (0, 0))\n\n if readFile('gameData.txt', 'enemiesKilled') < 750:\n #Prints this text:\n information_label1 = gameFont5.render(\"Logs encrypted, must collect data from alien spacecrafts\", 1, (white_color))\n WIN.blit(information_label1, (30, 30))\n else:\n information_label1 = gameFont5.render(\"\", 1, (white_color))\n WIN.blit(information_label1, (30, 1))\n\n #Writes this text:\n information_label2 = gameFont5.render(\"Year 2321. Earth Population: 12.1 billion:\", 1, (white_color))\n WIN.blit(information_label2, (30, information_label1.get_height() + 60))\n\n if readFile('gameData.txt', 'enemiesKilled') < 150:\n #Writes this text:\n information_label3 = gameFont5.render(f\"-------------- [{150 - readFile('gameData.txt', 'enemiesKilled')} Data samples required ]\", 1, (white_color))\n WIN.blit(information_label3, (30, information_label1.get_height() + information_label2.get_height() +90)) #ALL THIS ADDITION STUFF IS FOR SPACING OUT THE TEXT PROPERLY\n else:\n information_label3 = gameFont5.render(\"Initial Alien Contact\", 1, (white_color))\n WIN.blit(information_label3, (30, information_label1.get_height() + information_label2.get_height() +90))\n\n information_label4 = gameFont5.render(\"Year 2331. Earth population: 12.2 billion:\", 1, (white_color))\n WIN.blit(information_label4, (30, information_label1.get_height() + information_label2.get_height()+ information_label3.get_height() + 130))\n\n if readFile('gameData.txt', 'enemiesKilled') < 300:\n #Writes this text:\n information_label5 = gameFont5.render(f\"-------------- [{300 - readFile('gameData.txt', 'enemiesKilled')} Data samples required ]\", 1, (white_color))\n WIN.blit(information_label5, (30, information_label1.get_height() + information_label2.get_height()+ information_label3.get_height() + information_label4.get_height() + 170))\n else:\n information_label5 = gameFont5.render(\"Alien Trade\",1, (white_color))\n WIN.blit(information_label5, (30, information_label1.get_height() + information_label2.get_height() + information_label3.get_height() + information_label4.get_height() + 170))\n\n #Writes this text:\n information_label6 = gameFont5.render(\"Year 2345. Earth population: 12.5 billion:\", 1, (white_color))\n WIN.blit(information_label6, (30, information_label1.get_height() + information_label2.get_height()+ information_label3.get_height() + information_label4.get_height() + information_label5.get_height() + 210))\n\n if readFile('gameData.txt', 'enemiesKilled') < 450:\n #Writes this text:\n information_label7 = gameFont5.render(f\"-------------- [{450 - readFile('gameData.txt', 'enemiesKilled')} Data samples required ]\", 1, (white_color))\n WIN.blit(information_label7, (30, information_label1.get_height() + information_label2.get_height() + information_label3.get_height()+ information_label4.get_height() + information_label5.get_height() + information_label6 .get_height() + 250))\n else:\n #Writes this text:\n information_label7 = gameFont5.render(\"Human Intergalactic Travel\", 1, (white_color))\n WIN.blit(information_label7, (30, information_label1.get_height() + information_label2.get_height() + information_label3.get_height()+ information_label4.get_height() + information_label5.get_height() + information_label6 .get_height() + 250))\n\n information_label8 = gameFont5.render(\"Year 2351. Earth population 12.7 billion:\", 1, (white_color))\n WIN.blit(information_label8, (30, information_label1.get_height() + information_label2.get_height()+ information_label3.get_height() + information_label4.get_height() + information_label5.get_height() + information_label6.get_height() + information_label7.get_height() + 290))\n\n if readFile('gameData.txt', 'enemiesKilled') < 600:\n #Writes this text:\n information_label9 = gameFont5.render(f\"-------------- [{600 - readFile('gameData.txt', 'enemiesKilled')} Data samples required ]\", 1, (white_color))\n WIN.blit(information_label9, (30, information_label1.get_height() + information_label2.get_height()+ information_label3.get_height() + information_label4.get_height() + information_label5.get_height() + information_label6.get_height() + information_label7.get_height() + information_label8.get_height() + 320))\n else:\n information_label9 = gameFont5.render(\"Humans kill Alien Queen\", 1, (white_color))\n WIN.blit(information_label9, (30, information_label1.get_height() + information_label2.get_height()+ information_label3.get_height() + information_label4.get_height() + information_label5.get_height() + information_label6.get_height() + information_label7.get_height() + information_label8.get_height() + 320))\n\n information_label10 = gameFont5.render(\"Year 2354. Earth population 1 million:\", 1, (white_color))\n WIN.blit(information_label10, (30, information_label1.get_height() + information_label2.get_height()+ information_label3.get_height() + information_label4.get_height() + information_label5.get_height() + information_label6.get_height() + information_label7.get_height() + information_label8.get_height() + information_label9.get_height() + 350))\n\n if readFile('gameData.txt', 'enemiesKilled') < 750:\n #Writes this text:\n information_label11 = gameFont5.render(f\"-------------- [{750 - readFile('gameData.txt', 'enemiesKilled')} Data samples required ]\", 1, (white_color))\n WIN.blit(information_label11, (30, information_label1.get_height() + information_label2.get_height()+ information_label3.get_height() + information_label4.get_height() + information_label5.get_height() + information_label6.get_height() + information_label7.get_height() + information_label8.get_height() + information_label9.get_height() + information_label10.get_height() + 380))\n else:\n information_label11 = gameFont5.render(\"Aliens Massacre Humans\", 1, (white_color))\n WIN.blit(information_label11, (30, information_label1.get_height() + information_label2.get_height()+ information_label3.get_height() + information_label4.get_height() + information_label5.get_height() + information_label6.get_height() + information_label7.get_height() + information_label8.get_height() + information_label9.get_height() + information_label10.get_height() + 360))\n information_label12 = gameFont5.render(\"You are humanity's first and only hope. Go forth and fight\", 1, (white_color))\n WIN.blit(information_label12, (30, information_label1.get_height() + information_label2.get_height()+ information_label3.get_height() + information_label4.get_height() + information_label5.get_height() + information_label6.get_height() + information_label7.get_height() + information_label8.get_height() + information_label9.get_height() + information_label10.get_height() +information_label11.get_height()+ 370))\n\n #Main Menu Label\n main_menu_label = gameFont3.render(\"Main Menu\", 1, (white_color))\n WIN.blit (main_menu_label, (WIDTH-main_menu_label.get_width(), HEIGHT- 55))\n\n #Main Menu Button\n main_menu_picture = pygame.transform.scale(pygame.image.load('buttonPicture.png').convert_alpha(), (main_menu_label.get_width(), main_menu_label.get_height() - 10))\n main_menu_button = button.Button(WIDTH-main_menu_label.get_width(), HEIGHT- 55, main_menu_picture, 1)\n if main_menu_button.draw(WIN): main_menu()\n\n information_label13 = gameFont5.render (f\"Data Samples Collected: {readFile('gameData.txt', 'enemiesKilled')}\", 1, (white_color))\n WIN.blit(information_label13, (WIDTH - information_label13.get_width(), HEIGHT - 40 - main_menu_label.get_height()))\n\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n pygame.quit()\n\n#main function\ndef main ():\n run = True\n FPS = 60\n level = 0\n\n enemies = []\n wave_length = 5\n enemy_vel = 1\n\n COOLDOWNTIMER = 30 #THIS VALUE SHOULD BE SAME AS player.cooldownTimer!!!!!!!! (IN CLASS PLAYER)\n\n powerups = []\n\n if level < 3:\n powerups_max = 3\n elif level >= 3 and level < 6:\n powerups_max = 4\n elif level >= 6 and level < 9:\n powerups_max = 5\n elif level >= 9:\n powerups_max = 6\n\n powerup_vel = 2\n\n healAmount = 10 #How much the health powerup heals for\n speedAmount = 2 #How much speed the speed powerup gives\n cooldownAmount = 15 #How much cooldown reduction the cooldown powerup gives\n\n PLAYER_VEL = 9 #Non changing player velocity\n\n player_vel = PLAYER_VEL #Changing player velocity\n laser_vel = 5\n\n player = Player(int(WIDTH/2-PLAYER_SPACE_SHIP.get_width()/2), 630)\n\n clock = pygame.time.Clock()\n\n lost = False\n lost_count = 0\n\n i = 0\n\n pygame.mixer.music.load('gameMusic.mp3')\n pygame.mixer.music.play(-1)\n\n#Function for refreshing window\n def redraw_window (i): #Animated background, the i repsents the pixel of the background picture from which the background is going to be drawn. the i is decremented every loop\n WIN.blit (BG, [0, i])\n WIN.blit (BG, [0,-HEIGHT +i])\n\n #draw text\n health_label = gameFont3.render(f\"Health:{player.health}\", 1, (white_color)) #Renders health label\n level_label = gameFont3.render(f\"Level:{level}\", 1, (white_color)) #Renders level label\n points_label = gameFont3.render(f\"Points:{player.points}\", 1, (white_color)) #Renders points label\n\n WIN.blit(health_label, (10, 10)) #Displays player health\n WIN.blit(level_label, (WIDTH - level_label.get_width()-10, 10)) #Displays game level\n WIN.blit(points_label, (WIDTH/2 - points_label.get_width()/2, 10)) #Diplays the points\n\n for enemy in enemies: #Draws/creates enemies\n enemy.draw(WIN)\n\n for powerup in powerups: #Draws/creates powerups\n powerup.draw(WIN)\n\n player.draw(WIN) #Creates/draws the player\n\n if lost: #Loss message\n lost_label = gameFont4.render (\"SHIP DESTROYED\", 1, (white_color))\n WIN.blit(lost_label, (WIDTH/2 - lost_label.get_width()/2, 350))\n\n\n pygame.display.update() #Updates display\n\n\n#Main loop\n while run: #FPS clock speed\n clock.tick(FPS)\n redraw_window(i)\n if i == 0:\n i= HEIGHT\n i -= 1\n\n if player.health <=0: #Closes game when player loses\n lost = True\n lost_count += 1\n\n\n if lost: #goes back to main menu when player loses\n if lost_count > FPS * 1.5:\n writeFile ('gameData.txt', 'enemiesKilled', readFile('gameData.txt', 'enemiesKilled') + player.points)\n if player.points > readFile('gameData.txt', 'highscore'):\n writeFile('gameData.txt', 'highscore', player.points)\n run = False\n main_menu()\n else:\n continue\n\n if len(enemies) == 0: #Spawns new enemies when all enemies are gone\n level += 1\n wave_length += 5\n\n for i in range(wave_length):\n enemy = Enemy(random.randrange(50, WIDTH-100), random.randrange(-1200, -100), random.choice([\"green\", \"red\", \"pink\"]))\n enemies.append(enemy)\n\n if len(powerups) == 0: #Spawns new powerups when all powerups are gone\n\n for j in range(powerups_max): #HEIGHT = 1000\n powerup = Powerup(random.randrange(50, WIDTH-100), random.randrange(HEIGHT+1300,HEIGHT+1650), random.choice([HEALTHPOWERUP, SPEEDPOWERUP, COOLDOWNPOWERUP]))\n powerups.append (powerup)\n\n for event in pygame.event.get(): #Closes the game when the x is pressed\n if event.type == pygame.QUIT:\n quit()\n\n#Checks for key presses and moves player\n keys = pygame.key.get_pressed()\n if keys[pygame.K_a] and player.x - player_vel > 0: #left\n player.x -= player_vel\n if keys[pygame.K_d] and player.x + player_vel + player.get_width() < WIDTH: #right\n player.x += player_vel\n if keys[pygame.K_w] and player.y - player_vel > 0: #up\n player.y -= player_vel\n if keys[pygame.K_s] and player.y + player_vel + player.get_height() + 10 < HEIGHT: #down\n player.y += player_vel\n if keys[pygame.K_SPACE]:\n player.shoot()\n\n#Enemy movement and lasers\n for enemy in enemies[:]: #Enemy Loop\n enemy.move (enemy_vel)\n enemy.move_lasers(laser_vel, player)\n\n if random.randrange(0, 3*60) == 1: #Enemy random shoot\n enemy.shoot()\n\n if collide(enemy, player): #Checks for collision between enemy and player\n player.health -= 10\n enemies.remove(enemy)\n\n elif enemy.y + enemy.get_height() > HEIGHT: #Checks if enemy has gone off screen\n player.health -= 10\n enemies.remove(enemy)\n\n for powerup in powerups[:]: #Powerup loop\n powerup.move (powerup_vel)\n\n if collide(powerup, player): #Checks for collision between powerups and player\n if powerup.powerup_img == HEALTHPOWERUP:\n if player.health <= player.max_health - healAmount:\n player.health += healAmount\n\n elif player.health > player.max_health - healAmount:\n player.health = player.max_health\n\n powerups.remove (powerup)\n\n elif powerup.powerup_img == SPEEDPOWERUP: #Gives player speed powerup\n if player_vel == PLAYER_VEL:\n player_vel = player_vel + speedAmount\n player.speed_powerup_timer_counter = 1\n powerups.remove(powerup)\n\n elif powerup.powerup_img == COOLDOWNPOWERUP: #Gives player cooldown powerup\n if player.cooldownTimer == COOLDOWNTIMER:\n player.cooldownTimer = player.cooldownTimer - cooldownAmount\n player.cooldown_powerup_timer_counter = 1\n powerups.remove(powerup)\n elif player.cooldownTimer == COOLDOWNTIMER - cooldownAmount:\n powerups.remove(powerup)\n\n elif powerup.y - powerup.get_height() < -60: #Checks if the powerup is going offscreen\n powerups.remove(powerup)\n\n if player.speed_powerup_timer_counter >= player.speed_powerup_timer: #Timer for speed powerup\n player_vel = PLAYER_VEL\n elif player.speed_powerup_timer_counter > 0:\n player.speed_powerup_timer_counter += 1\n\n if player.cooldown_powerup_timer_counter >= player.cooldown_powerup_timer: #Timer for cooldown powerup\n player.cooldownTimer = 30\n elif player.cooldown_powerup_timer_counter > 0:\n player.cooldown_powerup_timer_counter += 1\n\n player.move_lasers(-laser_vel, enemies) #Moves player's laser\n\n#Function for main menu:\ndef main_menu():\n run = True\n while run:\n WIN.blit(BG, (0,0))\n\n #Title Label\n mainTitle_label = gameFont1.render(\"FIRST HOPE\", 1, (white_color))\n WIN.blit(mainTitle_label, (WIDTH / 2 - mainTitle_label.get_width() / 2, HEIGHT / 2 - mainTitle_label.get_height() / 2))\n\n #Highscore Label\n highscore_label = gameFont3.render(f\"HIGHSCORE: {readFile('gameData.txt', 'highscore')} \", 1, (white_color))\n WIN.blit (highscore_label, (WIDTH/2-highscore_label.get_width()/2, (HEIGHT/2-highscore_label.get_height()/2)+mainTitle_label.get_height() - 30))\n\n #Play Label\n play_label = gameFont3.render(\"PLAY\", 1, (white_color))\n WIN.blit (play_label, (WIDTH/2-play_label.get_width()/2, (HEIGHT/2-play_label.get_height()/2)+mainTitle_label.get_height()+highscore_label.get_height()))\n\n #Play Button\n play_button_picture = pygame.transform.scale(pygame.image.load('buttonPicture.png').convert_alpha(), (play_label.get_width(), play_label.get_height() - 10))\n play_button = button.Button((WIDTH/2-play_label.get_width()/2), ((HEIGHT/2-play_label.get_height()/2)+mainTitle_label.get_height()+highscore_label.get_height()), play_button_picture, 1)\n if play_button.draw(WIN): main()\n\n #Instructions Label\n instructions_label = gameFont3.render(\"Instructions\", 1, (white_color))\n WIN.blit (instructions_label, (WIDTH/2-instructions_label.get_width()/2, (HEIGHT/2-instructions_label.get_height()/2)+mainTitle_label.get_height()+highscore_label.get_height() + play_label.get_height() + 10))\n\n #Instructions Button\n instructions_picture = pygame.transform.scale(pygame.image.load('buttonPicture.png').convert_alpha(), (instructions_label.get_width(), instructions_label.get_height() - 10))\n instructions_button = button.Button ((WIDTH/2 - instructions_label.get_width()/2), ((HEIGHT/2- instructions_label.get_height()/2)+ mainTitle_label.get_height()+highscore_label.get_height() + play_label.get_height()+ 10), instructions_picture, 1)\n if instructions_button.draw (WIN): instructionsScreen()\n\n #Space Logs Label\n space_logs_label = gameFont3.render(\"Space Logs\", 1, (white_color))\n WIN.blit (space_logs_label, (WIDTH/2-space_logs_label.get_width()/2, (HEIGHT/2-space_logs_label.get_height()/2)+mainTitle_label.get_height()+highscore_label.get_height() + play_label.get_height() + instructions_label.get_height() + 15))\n\n #Space Logs button\n space_logs_picture = pygame.transform.scale(pygame.image.load('buttonPicture.png').convert_alpha(), (space_logs_label.get_width(), space_logs_label.get_height() - 10))\n space_logs_button = button.Button ((WIDTH/2 - space_logs_label.get_width()/2), ((HEIGHT/2 - space_logs_label.get_height()/2) + mainTitle_label.get_height()+highscore_label.get_height() + play_label.get_height() + instructions_label.get_height()+ 18), space_logs_picture, 1)\n if space_logs_button.draw (WIN): space_logs_screen() #main () is a place holder for now\n\n pygame.display.update()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n pygame.quit()","repo_name":"importTahsinZaman/FirstHope","sub_path":"gameScreens.py","file_name":"gameScreens.py","file_ext":"py","file_size_in_byte":24860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39556220752","text":"from flask import url_for, redirect, abort, request, render_template\n\nfrom CTFd.plugins.group_management.util import get_current_group\nfrom CTFd.utils.user import get_current_team, is_admin\n\n\ndef check_group_decorator(func):\n \"\"\" Checks if user has a group and if group is active. Only for non-API routes \"\"\"\n\n def wrapper(*args, **kwargs):\n current_team = get_current_team()\n if not current_team:\n return redirect(url_for(\"teams.private\", next=request.full_path))\n current_group = get_current_group(current_team)\n if not is_admin():\n if not current_group:\n return redirect(url_for('groups_choice', next=request.full_path))\n if not current_group.active:\n return (\n render_template(\n \"errors/403.html\",\n error=\"Your group is inactive. Ask your teacher for access permission\",\n ),\n 403,\n )\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndef check_group_api_decorator(func):\n \"\"\" Checks if user has a group and if group is active. Only for API routes \"\"\"\n\n def wrapper(*args, **kwargs):\n current_group = get_current_group(get_current_team())\n if not is_admin():\n if not current_group:\n return redirect(url_for('groups_choice'))\n if not current_group.active:\n return abort(403)\n return func(*args, **kwargs)\n\n return wrapper\n","repo_name":"me-gusta/ctfd_plugins","sub_path":"group_management/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25902442490","text":"\nimport copy\nimport numpy as np\nfrom .Hand import Hand\nfrom .Board import Board\nfrom .Error import PokerError, InvalidArgumentError\n\n\n\nclass Showdown:\n \"\"\"\n Takes a hand and a board and characterize the strength of the combination\n by identifying its rank and the kickers (an array of 5 cards)\n Later on when we will want to compare hands: the one with the greatest rank wins\n and if hands have equal rank they are differentiated by comparing one by one\n the kickers until finding one that can differentiate the two players.\n The kicker with the highest value determining the winner of the confrontation.\n \"\"\"\n\n RANKS = ['high card', 'pair', 'two pair', 'three of a kind', 'straight', \\\n 'flush', 'full house', 'four of a kind', 'straight flush']\n\n\n def __init__(self, hand_cards, board_cards):\n if len(hand_cards) != 2:\n raise InvalidArgumentError('Trying to construct a Showdown object with\\\n a hand that is not of length 2.')\n if len(board_cards) != 5:\n raise InvalidArgumentError('Trying to construct a Showdown object with a\\\n board that is not of length 5.')\n if None in board_cards:\n raise InvalidArgumentError(\"Trying to construct a Showdown with undefined cards\\\n for board.\")\n\n self.__hand = hand_cards\n self.__board = board_cards\n\n self.__cards = sorted(hand_cards + board_cards)\n\n\n\n\n def __repr__(self):\n return self.__cards.__repr__()\n\n\n # ranks\n # 9: straight flush\n # 8: four of a kind\n # 7: full house\n # 6: flush\n # 5: straight\n # 4: three of a kind\n # 3: two pairs\n # 2: pair\n # 1: high card\n def characterize(self):\n \"\"\"\n Return a tuple with the first argument being the rank of the hand and the\n second one being a numpy array of length 5 representing the 5 kickers of\n the hand.\n \"\"\"\n value_array = np.array(list(map(lambda x: x.value, self.__cards)))\n suit_array = np.array(list(map(lambda x: x.suit, self.__cards)))\n value_bins = np.bincount(value_array, minlength=14)\n\n # Two lists used to keep highest value of successive cards and the count of successive cards\n straight__flush__current_value = [0 for i in range(4)]\n straight__flush__count = [0 for i in range(4)]\n\n # suit_count is only used to spot a flush later\n # Each entry counts the number of cards encountered in each suit\n suit_count = np.zeros(4, dtype=int)\n\n # go through showdown's cards to check straight flush and plain flush\n for card in self.__cards:\n suit = card.suit\n\n # checking straight flush\n current_value = card.value\n if straight__flush__current_value[suit-1] == 0:\n straight__flush__current_value[suit-1] = current_value\n straight__flush__count[suit-1] = 1\n elif current_value == straight__flush__current_value[suit-1] + 1:\n straight__flush__current_value[suit-1] = current_value\n straight__flush__count[suit-1] += 1\n else:\n straight__flush__current_value[suit-1] = current_value\n straight__flush__count[suit-1] = 1\n\n if straight__flush__count[suit-1] >= 5:\n values_ans = np.zeros(5, dtype=int)\n values_ans[0] = straight__flush__current_value[suit-1]\n\n return np.append(np.array(9), values_ans)\n\n # suit_count is only used to spot a flush later\n suit_count[suit-1] += 1\n\n\n\n # intermediary step: check three of a kind and pairs\n four__of__a__kind__value = np.where(value_bins==4)[0]\n three_of_a_kind_value = np.where(value_bins==3)[0]\n pair_values = np.where(value_bins==2)[0]\n #print(\"value_bins\", value_bins)\n #print(\"pair_values\", pair_values)\n\n # check four of a kind\n if len(four__of__a__kind__value) != 0:\n values_ans = np.zeros(5, dtype=int)\n values_ans[0] = four__of__a__kind__value[0]\n\n # spotting the kicker\n if self.__cards[-1].value == four__of__a__kind__value[0]:\n kicker__value = self.__cards[-2].value\n else:\n kicker__value = self.__cards[-1].value\n values_ans[1] = kicker__value\n return np.append(np.array(8), values_ans)\n\n\n\n # check full house\n if len(three_of_a_kind_value) != 0 and len(pair_values) != 0:\n values_ans = np.zeros(5, dtype=int)\n values_ans[0] = three_of_a_kind_value[0]\n values_ans[1] = pair_values[-1]\n return np.append(np.array(7), values_ans)\n\n # check flush\n flush_finder = np.where(suit_count >= 5)[0]\n if len(flush_finder) != 0:\n suit_flush = flush_finder[0] + 1\n flush_values = value_array[np.where(suit_array == suit_flush)]\n values_ans = flush_values[-5:][::-1]\n return np.append(np.array(6), values_ans)\n\n\n\n # check straight\n if value_bins[-1] > 0:\n straight_value = 13\n straight_count = 1\n max_straight_count = 1\n max_straight_value = 0\n else:\n straight_value = 0\n straight_count = 0\n max_straight_count = 0\n max_straight_value = 0\n for i in range(1, value_bins.shape[0]):\n current_value = i\n if value_bins[i] > 0:\n if straight_value == 0:\n straight_value = current_value\n straight_count += 1\n elif current_value == straight_value % 13 + 1:\n straight_value = current_value\n straight_count += 1\n if straight_count > max_straight_count:\n max_straight_count = straight_count\n max_straight_value = straight_value\n else:\n straight_value = current_value\n straight_count = 1\n\n if max_straight_count >= 5:\n values_ans = np.zeros(5, dtype=int)\n values_ans[0] = max_straight_value\n return np.append(np.array(5), values_ans)\n\n # check three of a kind\n if len(three_of_a_kind_value) != 0:\n values_ans = np.zeros(5, dtype=int)\n values_ans[0] = three_of_a_kind_value[-1]\n\n first_kicker_found = False\n for index in range(13, -1, -1):\n if value_bins[index] != 0 and index != three_of_a_kind_value[-1]:\n if value_bins[index] > 1:\n values_ans[1] = index\n values_ans[2] = index\n return np.append(np.array(4), values_ans)\n if value_bins[index] == 1 and first_kicker_found == False:\n values_ans[1] = index\n first_kicker_found = True\n else:\n values_ans[2] = index\n return np.append(np.array(4), values_ans)\n\n # check two pair\n if len(pair_values) >= 2:\n values_ans = np.zeros(5, dtype=int)\n values_ans[0] = pair_values[-1]\n values_ans[1] = pair_values[-2]\n\n # spotting the last kicker\n for index in range(13, -1, -1):\n if (index in value_array\n and index != pair_values[-1]\n and index != pair_values[-2]):\n values_ans[2] = index\n break\n return np.append(np.array(3), values_ans)\n\n # check one pair\n if len(pair_values) == 1:\n values_ans = np.zeros(5, dtype=int)\n values_ans[0] = pair_values[0]\n\n # spotting the kickers\n # in this situation we know that there are only one pair amongst the\n # 7 cards of the showdown therefore no need to worry about the multiplicity\n # of the card's value when looking for the kickers\n nb_kickers_found = 0\n index = 13\n while index > 0 and nb_kickers_found < 3:\n if (index in value_array\n and index != pair_values[0]):\n nb_kickers_found += 1\n values_ans[nb_kickers_found] = index\n index -= 1\n return np.append(np.array(2), values_ans)\n\n return np.append(np.array(1), value_array[-5:][::-1])\n\n\n\n\n def get_string_rank(self):\n rank_array = self.characterize()\n #print(\"rank_array.dtype\", rank_array.dtype)\n #print(rank_array[0])\n return self.RANKS[rank_array[0]-1]\n\n\n\n\n\n def compress(self):\n sort_hand = sorted(self.__hand)\n sort_board = sorted(self.__board)\n concat = copy.deepcopy(sort_hand + sort_board)\n\n # suit_refs contains\n suit_converter = {1: None, 2: None, 3: None, 4: None}\n\n count_new_suits = 1\n for card in concat:\n suit = card.suit\n if suit_converter[suit] == None:\n suit_converter[suit] = count_new_suits\n card.suit = count_new_suits\n count_new_suits += 1\n else:\n card.suit = suit_converter[suit]\n\n return concat\n","repo_name":"adrienruault/pokerface","sub_path":"src/gameframework/Showdown.py","file_name":"Showdown.py","file_ext":"py","file_size_in_byte":9445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3490578456","text":"from __future__ import annotations\n\nfrom collections import ChainMap\n\nimport abc\nimport asyncio\nimport sys\nimport typing\nimport weakref\nfrom functools import wraps\n\nfrom .type_vars import T, SimpleCoroutine, S\n\n\ndef make_async(func: typing.Callable[[T], SimpleCoroutine[S] | S]) -> typing.Callable[\n [T], SimpleCoroutine[S]]:\n \"\"\"\n Decorator to turn a non async function into a coroutine by running it in the default executor pool.\n \"\"\"\n\n if asyncio.iscoroutinefunction(func):\n return typing.cast(typing.Callable[[T], SimpleCoroutine[S]], func)\n\n @wraps(func)\n async def _callback(*args):\n loop = asyncio.get_running_loop()\n result = await loop.run_in_executor(None, func, *args)\n\n if isinstance(result, typing.Awaitable):\n result = await result\n\n return result\n\n return _callback\n\n\nclass RegistryMeta(abc.ABCMeta):\n \"\"\"\n Set __unique_key_attr__ in created classes to some attribute of the class instance that is\n unique to use this attribute as a key in the registry instead of the __default_key__.\n If the __default_key__ attribute is not present in the instance, it will be copied from the class\n on instance creation, and appended with the number of instances created before.\n\n All types created by this metaclass will have a ``__registry_key__`` property, so that\n the key for an instance (i.e. either default key or unique key) can be accessed easily.\n\n Warning:\n\n If the instances get garbage collected, they will not be available from the registry anymore\n\n Example:\n\n Here you can see that the registry only uses weak references, and instances that get garbage collected\n are removed from the registry ::\n\n >>> from codestare.async_utils import RegistryMeta\n >>> class T(metaclass=RegistryMeta):\n ... __unique_key_attr__ = 'name'\n ... def __init__(self, name):\n ... self.name = name\n ... def __repr__(self):\n ... return f\"{self.__class__.__name__}(name={self.name!r})\"\n ...\n >>> a = T('foo')\n >>> T.registry\n {'foo': T(name='foo')}\n >>> import gc\n >>> gc.collect()\n 0\n >>> T.registry\n {'foo': T(name='foo')}\n >>> a = T('bar')\n >>> T.registry\n {'foo': T(name='foo'), 'bar': T(name='bar')}\n >>> gc.collect()\n 0\n >>> T.registry\n {'bar': T(name='bar')}\n\n \"\"\"\n _wrap_marker = object()\n\n @staticmethod\n def _post_new(instance: typing.Any):\n cls: RegistryMeta = type(instance)\n\n # we set value in all parents, so they know a new instance is created\n # we know that the mappings are mutable, so it's ok to set the values.\n for mapping in cls.__created__.maps:\n mapping[instance.__registry_key__] = instance # type: ignore\n\n @property\n def registry(cls: typing.Type[T]) -> typing.Dict[typing.Any, T]:\n \"\"\"\n Mapping :math:`\\\\text{instance.__registry_key__} \\\\rightarrow instance`\n \"\"\"\n return {\n instance.__registry_key__: instance\n for instance in cls.__created__.maps[0].values()\n if instance is not None # only live references\n }\n\n @staticmethod\n def _wrap_new(__new__):\n @wraps(__new__)\n def wrapped(cls, *args, **kwargs):\n instance = __new__(cls)\n RegistryMeta._post_new(instance)\n return instance\n\n wrap_markers = getattr(wrapped, 'markers', set())\n wrap_markers.add(RegistryMeta._wrap_marker)\n wrapped.markers = wrap_markers\n\n return wrapped\n\n def __new__(mcs, name, bases, attrs):\n kls: RegistryMeta = super().__new__(mcs, name, bases, attrs)\n\n wrap_markers = getattr(kls.__new__, 'markers', set())\n if mcs._wrap_marker not in wrap_markers:\n kls.__new__ = mcs._wrap_new(kls.__new__)\n\n class_namespace = kls.__dict__\n\n if not hasattr(kls, '__created__'):\n kls.__created__ = ChainMap(weakref.WeakValueDictionary())\n else:\n parent = kls.__created__\n setattr(kls, '__created__', parent.new_child(weakref.WeakValueDictionary()))\n\n if '__default_key_value__' not in class_namespace:\n kls.__default_key_value__ = f\"{attrs['__module__']}.{attrs['__qualname__']}\"\n\n if not hasattr(kls, '__unique_key_attr__'):\n kls.__unique_key_attr__ = '__missing_key__'\n\n if '__registry_key__' not in class_namespace:\n kls.__registry_key__ = property(\n fget=(\n lambda instance: getattr(\n instance,\n instance.__unique_key_attr__,\n f\"{kls.__default_key_value__}_{id(instance)}\"\n )\n )\n )\n\n return kls\n\n\nclass Registry(object, metaclass=RegistryMeta):\n \"\"\"\n You can inherit from this class to implicitly use the :class:`RegistryMeta` metaclass\n\n Example:\n\n Define a registry with the metaclass or by inheriting :class:`Registry` ::\n\n from codestare.async_utils import RegistryMeta, Registry\n\n # virtually equivalent for most intents and purposes\n\n class Foo(metaclass=RegistryMeta):\n pass\n\n class Bar(Registry):\n pass\n\n See Also:\n :class:`RegistryMeta` -- more information about working with registry classes\n \"\"\"\n pass\n\n\ndef async_exit_on_exc(ctx_manager: typing.AsyncContextManager, task: asyncio.Task,\n loop: asyncio.BaseEventLoop = None) -> None:\n \"\"\"\n Schedules exit of the ``ctx_manager`` if the getting the task result raises an exception other than a\n :class:`asyncio.CancelledError`\n\n Args:\n ctx_manager: Some context manager that needs to be closed with exception info for exceptions raised\n by the ``task``\n\n task: a task that maybe succeeded or raised an exception\n loop: event loop to schedule the exit, uses current running loop if not provided -- `optional`\n\n \"\"\"\n\n loop = loop or asyncio.get_running_loop()\n try:\n task.result()\n except asyncio.CancelledError:\n pass\n except: # noqa\n exc_info = sys.exc_info()\n loop.call_soon(ctx_manager.__aexit__(*exc_info).__await__().__next__)\n\n\nclass awaitable_predicate:\n \"\"\"\n Typically, to let an ``async`` coroutine wait until some predicate is `True`, one uses a :class:`asyncio.Condition`.\n :meth:`Condition.wait_for(predicate) ` will block the coroutine until the ``predicate``\n returns `True` -- ``predicate`` will be reevaluated every time the condition\n :meth:`notifies ` waiting coroutines.\n\n An :class:`awaitable_predicate` object does exactly that, but it can also be evaluated to a boolean to make\n code more concise\n\n Example:\n\n >>> from codestare.async_utils import awaitable_predicate\n >>> value = 0\n >>> is_zero = awaitable_predicate(lambda: value == 0)\n >>> bool(is_zero)\n True\n >>> value = 1\n >>> bool(is_zero)\n False\n\n Or we can `wait` until the predicate is actually `True`\n\n >>> [...] # continued from above\n >>> async def set_value(number):\n ... global value\n ... async with is_zero.condition:\n ... value = number\n ... is_zero.condition.notify()\n ...\n >>> async def wait_for_zero():\n ... await is_zero\n ... print(f\"Finally! value: {value}\")\n ...\n >>> import asyncio\n >>> async def main():\n ... asyncio.create_task(wait_for_zero())\n ... for n in reversed(range(3)):\n ... await set_value(n)\n ...\n >>> asyncio.run(main())\n Finally! value: 0\n\n \"\"\"\n\n def __init__(self, predicate: typing.Callable[[], bool], condition: asyncio.Condition | None = None, timeout=None):\n self.condition = condition or asyncio.Condition()\n self.predicate = predicate\n self.waiting = None\n self.timeout = timeout\n\n async def _waiter(self):\n async with self.condition:\n await self.condition.wait_for(self.predicate)\n\n def __await__(self):\n if self.waiting is None:\n self.waiting = asyncio.create_task(self._waiter())\n\n return asyncio.wait_for(self.waiting, timeout=self.timeout).__await__()\n\n def __bool__(self):\n return self.predicate()","repo_name":"saggitar/codestare-async-utils","sub_path":"src/codestare/async_utils/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":8720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36675690914","text":"\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torchvision.transforms as transforms\r\nimport time\r\nimport resnet\r\nimport utils\r\n\r\n\r\ndef train():\r\n # load data: dataset/train_kana/ includes 388,146 images\r\n trainFolder = \"D:/ImageLab/Camp/2019/alcon2019/dataset/train_kana/\"\r\n train_batch_size = 8\r\n\r\n transform = transforms.Compose( [ transforms.Resize( ( 224, 224 ) ), transforms.ToTensor(), transforms.Normalize( [ 0.5 ], [ 0.25 ] ) ] )\r\n # transform = transforms.Compose( [ transforms.Resize( ( 224, 224 ) ), transforms.ToTensor(), transforms.Normalize( ( 0.485, 0.456, 0.406 ), ( 0.229, 0.224, 0.225 ) ) ] )\r\n trainset = resnet.KanaDataset( trainFolder, transform )\r\n trainloader = torch.utils.data.DataLoader( trainset, batch_size=train_batch_size, shuffle=True, num_workers=2 )\r\n\r\n classes = trainset.getClasses()\r\n classNum = len( classes )\r\n # for i, c in enumerate( classes, 0 ):\r\n # print( \"%d %s %s\" % ( i, c, utils.unicode2kana( c ) ) )\r\n\r\n # use CUDA\r\n device = torch.device( \"cuda:0\" if torch.cuda.is_available() else \"cpu\" )\r\n print( device )\r\n\r\n # generate net\r\n net = resnet.Net( outputFeatures=classNum )\r\n net = net.to( device )\r\n\r\n # set parameters\r\n criterion = nn.CrossEntropyLoss() # nn.MSELoss()\r\n optimizer = optim.SGD( net.parameters(), lr=0.1, momentum=0.9 )\r\n\r\n # load parameters\r\n net.load_state_dict( torch.load( \"result/122_weight.pth\" ) )\r\n optimizer.load_state_dict( torch.load( \"result/122_optimizer.pth\" ) )\r\n\r\n # train\r\n starttime = time.time()\r\n print( \"Start Training\" )\r\n\r\n epochNum = 128\r\n for epoch in range( 122, epochNum ):\r\n runningloss = 0.0\r\n for i, data in enumerate( trainloader, 0 ):\r\n inputs, labels = data[ 0 ].to( device ), data[ 1 ].to( device )\r\n\r\n optimizer.zero_grad()\r\n outputs = net( inputs )\r\n\r\n loss = criterion( outputs, labels )\r\n loss.backward()\r\n optimizer.step()\r\n\r\n runningloss += loss.item()\r\n if i % 2000 == 1999:\r\n print( \"[%d, %5d] loss: %.3f\" % ( epoch + 1, i + 1, runningloss / 2000 ) )\r\n runningloss = 0.0\r\n\r\n torch.save( net.state_dict(), \"result/%s_weight.pth\" % str( epoch + 1 ).zfill( len( str( epochNum ) ) ) )\r\n torch.save( optimizer.state_dict(), \"result/%s_optimizer.pth\" % str( epoch + 1 ).zfill( len( str( epochNum ) ) ) )\r\n\r\n elapsed = time.time() - starttime\r\n print( \"Training Finished [%d sec]\" % elapsed )\r\n\r\n # save trained-model\r\n torch.save( net.state_dict(), \"result/weight.pth\" )\r\n torch.save( optimizer.state_dict(), \"result/optimizer.pth\" )\r\n\r\n return\r\n\r\n\r\ndef test():\r\n # load data\r\n testFolder = \"D:/ImageLab/Camp/2019/alcon2019/dataset/train_kana/\"\r\n test_batch_size = 8\r\n\r\n transform = transforms.Compose( [ transforms.Resize( ( 224, 224 ) ), transforms.ToTensor(), transforms.Normalize( [ 0.5 ], [ 0.25 ] ) ] )\r\n # transform = transforms.Compose( [ transforms.Resize( ( 224, 224 ) ), transforms.ToTensor(), transforms.Normalize( ( 0.485, 0.456, 0.406 ), ( 0.229, 0.224, 0.225 ) ) ] )\r\n testset = resnet.KanaDataset( testFolder, transform, isTest=True )\r\n testloader = torch.utils.data.DataLoader( testset, batch_size=test_batch_size, shuffle=False, num_workers=2 )\r\n\r\n classes = testset.getClasses()\r\n classNum = len( classes )\r\n # for i, c in enumerate( classes, 0 ):\r\n # print( \"%d %s %s\" % ( i, c, utils.unicode2kana( c ) ) )\r\n\r\n # use CUDA\r\n device = torch.device( \"cuda:0\" if torch.cuda.is_available() else \"cpu\" )\r\n print( device )\r\n\r\n # generate net\r\n net = resnet.Net( outputFeatures=classNum )\r\n net.load_state_dict( torch.load( \"result/48_weight.pth\" ) )\r\n net = net.to( device )\r\n\r\n # test using by train dataset\r\n starttime = time.time()\r\n print( \"Start Testing\" )\r\n\r\n all_correct = 0\r\n all_total = 0\r\n class_correct = list( 0. for i in range( classNum ) )\r\n class_total = list( 0. for i in range( classNum ) )\r\n with torch.no_grad():\r\n for data in testloader:\r\n images, labels = data\r\n images = images.to( device )\r\n labels = labels.to( device )\r\n outputs = net( images )\r\n _, predicted = torch.max( outputs, 1 )\r\n corrects = ( predicted == labels ) #.squeeze()\r\n\r\n all_correct += corrects.sum().item()\r\n all_total += labels.size( 0 )\r\n\r\n for i, label in enumerate( labels, 0 ):\r\n class_correct[ label ] += corrects[ i ].item()\r\n class_total[ label ] += 1\r\n\r\n elapsed = time.time() - starttime\r\n print( \"Testing Finished [%d sec]\" % elapsed )\r\n print( \"Accuracy of the network on the %d test images: %d %% ( %d / %d )\" % ( len( testset ), 100 * all_correct / all_total, all_correct, all_total ) )\r\n\r\n for i in range( classNum ):\r\n print( \"Accuracy of %5s(%s) : %2d %% ( %d / %d )\" % ( classes[ i ], utils.unicode2kana( classes[ i ] ), 100 * class_correct[ i ] / class_total[ i ], class_correct[ i ], class_total[ i ] ) )\r\n\r\n return\r\n\r\n\r\nif __name__ == \"__main__\":\r\n train()\r\n test()\r\n","repo_name":"hiSirius/kuzushimoji","sub_path":"src/learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":5222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6412817008","text":"from sqlalchemy import Column, Integer, String, Boolean\nfrom config.Database import BaseModel\n\n\nclass Permission(BaseModel):\n __tablename__ = \"permission\"\n\n id = Column(Integer, primary_key=True, index=True, nullable=True)\n name = Column(String(255), unique=True, nullable=False)\n is_active = Column(Boolean, nullable=False, default=True)\n","repo_name":"BrannMojLop/template_fastapi","sub_path":"models/Permission.py","file_name":"Permission.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28699404966","text":"# I = 1\n# V = 5\n# X = 10\n# L = 50\n# C = 100\n# D = 500\n# M = 1000\n\n# input: s = \"III\"\n# output: 3\n# explanation: III = 3\n\n# input: s = \"LVIII\"\n# output: 58\n# explanation: L = 50, V = 5, III = 3\n\n# input: s = \"MCMXCIV\"\n# output: 1994\n# explanation: M = 1000, CM = 900, CX = 90 and IV = 4\n\nclass Solution:\n def romanToInt(self, s: str) -> int:\n rd = {\n \"I\" : 1,\n \"V\": 5,\n \"X\": 10,\n \"L\": 50,\n \"C\": 100,\n \"D\": 500,\n \"M\": 1000\n }\n\n n = len(s)\n rt = 0\n for i in range(n):\n if i == n - 1 or rd[s[i]] >= rd[s[i + 1]]:\n rt += rd[s[i]]\n else:\n rt -= rd[s[i]]\n return rt\n\n\ns = \"III\"\nob1 = Solution()\nprint(ob1.romanToInt(s))","repo_name":"johgee/blind-75","sub_path":"romantointeger.py","file_name":"romantointeger.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71201176586","text":"import psycopg2\nfrom shapely.geometry import LineString, Polygon, shape\nfrom shapely.ops import transform\nfrom shapely import wkb\nimport pyproj\nfrom functools import partial\nfrom shapely.geometry import Polygon, mapping\nfrom area import area\n\nconn = psycopg2.connect(port=5432, password=\"password\", host=\"192.168.99.100\", user=\"postgres\")\n\ncurs = conn.cursor()\n\n\ndef create_geojson(polygon):\n coordinates = [[]]\n\n for point in polygon:\n coordinates[0].append([point[0], point[1]])\n obj = {'type':'Polygon','coordinates':coordinates}\n return obj\n\ndef read_fences():\n fences = []\n areas = []\n with open(\"fences.txt\") as fp:\n line = fp.readline()\n while line:\n x = line.replace(\"(\", \"\")\n x = x.replace(\")\", \"\")\n cords = x.split(\",\")\n latlon = []\n for i in range(0, len(cords), 2):\n latlon.append((float(cords[i]), float(cords[i + 1])))\n areas.append(area(create_geojson(mapping(Polygon(latlon))[\"coordinates\"][0])) / 1000000)\n fences.append(Polygon(latlon))\n line = fp.readline()\n return fences, areas\n\nfences, areas = read_fences()\ncurs.execute('CREATE TEMP TABLE my_polygons(geom geometry, name text)')\nfor i in range(0, len(fences)):\n # Send it to PostGIS\n\n curs.execute(\n 'INSERT INTO my_polygons(geom, name)'\n 'VALUES (ST_SetSRID(%(geom)s::geometry, %(srid)s), %(name)s)',\n {'geom': fences[i].wkb_hex, 'srid': 4326, 'name': str(i)})\n\n conn.commit() # save data\n\n# Fetch the data from PostGIS, reading hex-encoded WKB into a Shapely geometry\ncurs.execute('SELECT name, geom FROM my_polygons')\nfor name, geom_wkb in curs:\n geom = wkb.loads(geom_wkb, hex=True)\n #print('{0}: {1}'.format(name, geom.wkt))\n s = shape(geom)\n proj = partial(pyproj.transform, pyproj.Proj(init='epsg:4326'),\n pyproj.Proj(init='epsg:3857'))\n\n print(transform(proj, s).area / 1000000)\n print(areas[int(name)])\n # First Line: LINESTRING Z (2.2 4.4 10.2, 3.3 5.5 8.4)\n","repo_name":"danieldimit/iosl-verified-locations-for-smart-contracts","sub_path":"oracle/hashing/visualization/postgis/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7629186414","text":"from __future__ import print_function\nimport sys\nsys.argv = [arg for arg in sys.argv if not arg.startswith(\"-psn_\")]\n\ntry:\n\timport argparse\n\targParser = argparse.ArgumentParser(add_help=False) # We add help manually.\nexcept ImportError:\n\tif len(sys.argv) > 1:\n\t\tprint(\"Warning, argparse (for parsing sys.argv) not available. This needs Python >=2.7. Ignoring sys.argv.\")\n\t# Dummy fallback\n\tclass DummyArgParser:\n\t\tdef add_argument(self, key, **kwargs):\n\t\t\tif key[0:2] == \"--\":\n\t\t\t\tsetattr(self, key[2:], False)\n\t\tdef parse_known_args(self, *args, **kwargs): return self, ()\n\t\tparse_args = parse_known_args\n\targParser = DummyArgParser()\n\n\nclass ArgAction_PrintHelp(argparse.Action):\n\tdef __call__(self, parser, namespace, values, option_string=None):\n\t\tparser.print_help()\n\t\t# We don't use parser.exit() because we overwrite that to not exit.\n\t\t# But we actually want to exit after this.\n\t\traise SystemExit\n\nargParser.add_argument(\n\t\"--help\", \"-h\", nargs=0, action=ArgAction_PrintHelp,\n\thelp=\"prints help/usage\"\n)\nargParser.add_argument(\n\t\"--shell\", action=\"store_true\",\n\thelp=\"uses a Python shell instead of the standard stdin control\"\n)\nargParser.add_argument(\n\t\"--pyshell\", action=\"store_true\", help=\"just a bare Python shell - no musicplayer stuff\"\n)\nargParser.add_argument(\n\t\"--pyexec\", nargs=1,\n\thelp=\"just executes the Python commands - no musicplayer stuff\",\n\tmetavar=\"\"\n)\nargParser.add_argument(\n\t\"--gui\", nargs=1,\n\thelp=\"use this GUI. possibilities: cocoa, qt, html, none\"\n)\nargParser.add_argument(\n\t\"--forkExecProc\", nargs=2, help=argparse.SUPPRESS\n)\nargParser.add_argument(\n\t# Used by MacOSX in some debug cases.\n\t\"-NSDocumentRevisionsDebugMode\", nargs=1, help=argparse.SUPPRESS\n)\nargParser.add_argument(\n\t# Used in MacOSX binary version.\n\t\"--nolog\", action=\"store_true\", help=argparse.SUPPRESS\n)\nargParser.add_argument(\n\t\"--debug\", action=\"store_true\", help=\"enable debugging (atm, just more debug output)\"\n)\nargParser.add_argument(\n\t\"--profile\", action=\"store_true\", help=\"enable profiling\"\n)\nargParser.add_argument(\n\t\"--nomodstartup\", action=\"store_true\", help=\"(debugging) don't load mods at startup\"\n)\nargParser.add_argument(\n\t\"--addsyspythonpaths\", action=\"store_true\", help=\"(debugging) add System Python paths\"\n)\n\n\nclass ArgParserExitException(Exception): pass\n\n# Patch to avoid exit on unknown args.\ndef argParser_exit(status=0, message=None):\n\tif message:\n\t\tmessage = message.strip()\n\t\tprint(\"arg parser:\", message)\n\t# Don't exit because we want to live!\n\t# Exceptions will be handled in appinfo.\n\traise ArgParserExitException(message)\nargParser.exit = argParser_exit\n\nignore = False\n","repo_name":"albertz/music-player","sub_path":"src/appinfo_args.py","file_name":"appinfo_args.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"81"} +{"seq_id":"74234843143","text":"import weaviate\n\nclient = weaviate.Client(\n url=\"http://localhost:8080\",\n)\n\nobject = {\n \"class\": \"Intermediate\",\n \"properties\": [\n {\n \"name\": \"text\",\n \"dataType\": [\"text\"],\n },\n {\n \"name\": \"start\",\n \"dataType\": [\"int\"],\n },\n {\n \"name\": \"end\",\n \"dataType\": [\"int\"],\n },\n {\n \"name\": \"order\",\n \"dataType\": [\"int\"]\n },\n {\n \"dataType\": [\"text\"],\n \"name\": \"userid\",\n },\n {\n \"dataType\": [\"boolean\"],\n \"name\": \"public\",\n },\n {\n \"dataType\": [\"boolean\"],\n \"name\": \"isResult\",\n },\n {\"name\": \"source\", \"dataType\": [\"Intermediate\"]},\n {\"name\": \"sourceFor\", \"dataType\": [\"Intermediate\"]},\n\n ],\n}\n\nclient.schema.create_class(object)\n","repo_name":"djrobinson/summa.ai","sub_path":"db/migrations/2_intermediate.py","file_name":"2_intermediate.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32328279527","text":"print('Enter input file number (e.g. for \"10.in\" enter: 10):')\nfileNum = str(input())\n\n# Read from .in file if exists\ntry:\n inputFileName = './InputFiles/' + fileNum + '.in'\n inputFile = open(inputFileName, \"r\")\n print(\"Thanks, working...\")\nexcept FileNotFoundError:\n print(\"This file does not exist: make sure the correct file number was provided.\")\n exit()\n\n# Parse first line for N number of shifts\nN = int(inputFile.readline())\n\n# Parse for rows\nrows = inputFile.readlines()\nprint(\"Parsing completed...\")\n\n# Parse rows for shifts\nshifts = {}\npersonCtr = 1\n\nfor r in rows:\n # Parse for shifts\n shiftStart = int(r.split(\" \")[0])\n shiftEnd = int(r.split(\" \")[1].split(\"\\n\")[0])\n currShift = [shiftStart, shiftEnd]\n shifts[personCtr] = currShift\n personCtr += 1\nprint(\"Shifts recorded...\")\n\n\n# Record all covered hours without duplicates\nallHoursCovered = set({})\nhoursCoveredShifts = []\n\nfor person in shifts:\n shift = shifts[person]\n\n # Get all available hours to update all at once\n actualHours = [t for t in range(shifts[person][0], shifts[person][1])]\n allHoursCovered.update(actualHours)\n\n allHoursCoveredLen = len(allHoursCovered)\n \n # Calculate new covered hours after each new shift\n if person > 1:\n CoverageHoursContribution = allHoursCoveredLen - coveredHoursBefore\n\n # Reinitialize previous element with current element\n coveredHoursBefore = allHoursCoveredLen\n\n # Calculate covered hours after first shift and intialize previous element to calculate current element later\n else:\n CoverageHoursContribution = coveredHoursBefore = allHoursCoveredLen\n\n # Append contribution of each shift to actual coverage\n hoursCoveredShifts.append(CoverageHoursContribution)\n\n# Reduct the minimum contribution to find max amount of hours without safeguard to fire\nmaxAmount = str(len(allHoursCovered) - min(hoursCoveredShifts))\nprint(\"Calculations done...\")\n\n# Write the max amount into respective output files\noutputFileName = './OutputFiles/' + fileNum + '.out'\nwith open(str(outputFileName), \"w\") as f:\n f.write(maxAmount)\n f.close() \n\nprint(fileNum + \".out was succesfully created: \\n\" + str(maxAmount))\n","repo_name":"jgelfman/MSSM-Summer-Preparation-Data-Structures-Assignment","sub_path":"GelfmanMysteriousSafeguards.py","file_name":"GelfmanMysteriousSafeguards.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4263824342","text":"from unittest import mock\n\nimport pytest\nfrom django.conf import settings\nfrom opensearch_dsl import Keyword, Mapping\n\nfrom datahub.search import opensearch as opensearch_client\n\n\n@mock.patch('datahub.search.opensearch.opensearch_bulk')\ndef test_bulk(opensearch_bulk, mock_opensearch_client):\n \"\"\"Tests detailed company search.\"\"\"\n actions = []\n chunk_size = 10\n opensearch_client.bulk(actions=actions, chunk_size=chunk_size)\n\n opensearch_bulk.assert_called_with(\n mock_opensearch_client.return_value,\n actions=actions,\n chunk_size=chunk_size,\n max_chunk_bytes=settings.OPENSEARCH_BULK_MAX_CHUNK_BYTES,\n )\n\n\n@pytest.mark.parametrize('expected', (True, False))\ndef test_index_exists(mock_opensearch_client, expected):\n \"\"\"Tests that `index_exists` returns True if the index exists, False otherwise.\"\"\"\n index_name = 'test'\n\n connection = mock_opensearch_client.return_value\n connection.indices.exists.return_value = expected\n\n assert opensearch_client.index_exists(index_name) == expected\n connection.indices.exists.assert_called_with(index_name)\n\n\n@mock.patch('datahub.search.opensearch.settings')\n@mock.patch('datahub.search.opensearch.connections')\ndef test_configure_connection(connections, settings):\n \"\"\"Test configuration of the connection.\"\"\"\n settings.OPENSEARCH_URL = 'https://login:password@test:1234'\n connections.configure.return_value = {}\n\n opensearch_client.configure_connection()\n\n connections.configure.assert_called_with(default={\n 'hosts': [settings.OPENSEARCH_URL],\n 'verify_certs': settings.OPENSEARCH_VERIFY_CERTS,\n })\n\n\ndef test_creates_index(monkeypatch, mock_connection_for_create_index):\n \"\"\"Test creates_index().\"\"\"\n monkeypatch.setattr(\n 'django.conf.settings.OPENSEARCH_INDEX_SETTINGS',\n {\n 'testsetting1': 'testval1',\n },\n )\n mapping = Mapping()\n mapping.field('test-field', Keyword())\n index = 'test-index'\n connection = mock_connection_for_create_index.return_value\n\n opensearch_client.create_index(index, mapping, alias_names=('alias1', 'alias2'))\n connection.indices.create.assert_called_once_with(\n index='test-index',\n body={\n 'settings': {\n 'testsetting1': 'testval1',\n 'analysis': {\n 'analyzer': {\n 'trigram_analyzer': {\n 'tokenizer': 'trigram',\n 'char_filter': ['special_chars'],\n 'filter': ['lowercase'],\n 'type': 'custom',\n },\n 'english_analyzer': {\n 'tokenizer': 'standard',\n 'filter': [\n 'english_possessive_stemmer',\n 'lowercase',\n 'english_stop',\n 'english_stemmer',\n ],\n 'type': 'custom',\n },\n },\n 'tokenizer': {\n 'trigram': {\n 'min_gram': 3,\n 'max_gram': 3,\n 'token_chars': ('letter', 'digit'),\n 'type': 'nGram',\n },\n },\n 'char_filter': {\n 'special_chars': {\n 'mappings': ('-=>',),\n 'type': 'mapping',\n },\n },\n 'filter': {\n 'english_possessive_stemmer': {\n 'language': 'possessive_english',\n 'type': 'stemmer',\n },\n 'english_stop': {\n 'stopwords': '_english_', 'type': 'stop',\n },\n 'english_stemmer': {\n 'language': 'english', 'type': 'stemmer',\n },\n },\n },\n },\n 'aliases': {\n 'alias1': {},\n 'alias2': {},\n },\n 'mappings': {\n 'properties': {\n 'test-field': {\n 'type': 'keyword',\n },\n },\n },\n },\n )\n\n\ndef test_delete_index(mock_opensearch_client):\n \"\"\"Test delete_index().\"\"\"\n index = 'test-index'\n client = mock_opensearch_client.return_value\n opensearch_client.delete_index(index)\n client.indices.delete.assert_called_once_with(index)\n\n\n@pytest.mark.parametrize(\n 'aliases,response,result',\n (\n (\n ('alias1',),\n {\n 'index1': {'aliases': {'alias1': {}}},\n },\n [{'index1'}],\n ),\n (\n ('alias2',),\n {\n 'index2': {'aliases': {'alias2': {}}},\n },\n [{'index2'}],\n ),\n (\n ('alias1', 'alias2'),\n {\n 'index1': {'aliases': {'alias1': {}}},\n 'index2': {'aliases': {'alias2': {}}},\n },\n [{'index1'}, {'index2'}],\n ),\n ),\n ids=['(alias1,)', '(alias1,alias2)', '(alias2,)'],\n)\ndef test_get_indices_for_aliases(mock_opensearch_client, aliases, response, result):\n \"\"\"Test get_indices_for_aliases().\"\"\"\n client = mock_opensearch_client.return_value\n client.indices.get_alias.return_value = response\n assert opensearch_client.get_indices_for_aliases(*aliases) == result\n\n\ndef test_get_aliases_for_index(mock_opensearch_client):\n \"\"\"Test get_aliases_for_index().\"\"\"\n index = 'test-index'\n client = mock_opensearch_client.return_value\n client.indices.get_alias.return_value = {\n index: {\n 'aliases': {\n 'alias1': {},\n 'alias2': {},\n },\n },\n }\n assert opensearch_client.get_aliases_for_index(index) == {'alias1', 'alias2'}\n client.indices.get_alias.assert_called_with(index=index)\n\n\n@pytest.mark.parametrize('expected', (True, False))\ndef test_alias_exists(mock_opensearch_client, expected):\n \"\"\"Test alias_exists().\"\"\"\n index_name = 'test-index'\n\n client = mock_opensearch_client.return_value\n client.indices.exists_alias.return_value = expected\n\n assert opensearch_client.alias_exists(index_name) == expected\n client.indices.exists_alias.assert_called_with(name=index_name)\n\n\n@pytest.mark.parametrize(\n 'add_actions,remove_actions,expected_body',\n (\n (\n (\n (),\n (\n ('test-alias', ('index1', 'index2')),\n ),\n {\n 'actions': [{\n 'remove': {\n 'alias': 'test-alias',\n 'indices': ['index1', 'index2'],\n },\n }],\n },\n ),\n (\n (\n ('test-alias', ('index1', 'index2')),\n ),\n (),\n {\n 'actions': [{\n 'add': {\n 'alias': 'test-alias',\n 'indices': ['index1', 'index2'],\n },\n }],\n },\n ),\n (\n (\n ('test-alias', ('index1', 'index2')),\n ),\n (\n ('test-alias-2', ('index3', 'index4')),\n ),\n {\n 'actions': [\n {\n 'add': {\n 'alias': 'test-alias',\n 'indices': ['index1', 'index2'],\n },\n },\n {\n 'remove': {\n 'alias': 'test-alias-2',\n 'indices': ['index3', 'index4'],\n },\n },\n ],\n },\n ),\n )\n ),\n)\ndef test_update_alias(mock_opensearch_client, add_actions, remove_actions, expected_body):\n \"\"\"Test get_aliases_for_index().\"\"\"\n client = mock_opensearch_client.return_value\n with opensearch_client.start_alias_transaction() as alias_transaction:\n for action in add_actions:\n alias_transaction.associate_indices_with_alias(action[0], action[1])\n for action in remove_actions:\n alias_transaction.dissociate_indices_from_alias(action[0], action[1])\n client.indices.update_aliases.assert_called_with(body=expected_body)\n\n\ndef test_create_alias(mock_opensearch_client):\n \"\"\"Test create_alias().\"\"\"\n index_name = 'test-index'\n alias_name = 'test-alias'\n\n client = mock_opensearch_client.return_value\n\n opensearch_client.associate_index_with_alias(alias_name, index_name)\n client.indices.put_alias.assert_called_with(index_name, alias_name)\n","repo_name":"uktrade/data-hub-api","sub_path":"datahub/search/test/test_opensearch.py","file_name":"test_opensearch.py","file_ext":"py","file_size_in_byte":9313,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"24886698582","text":"import urllib.request, urllib.parse, urllib.error\nimport json\nimport ssl\nimport time as tm\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.dates import DateFormatter\nimport matplotlib.ticker as ticker\n\n# covid data url\nurl = 'https://opendata.ecdc.europa.eu/covid19/casedistribution/json'\n\n# print('Retrieving', url)\nuh = urllib.request.urlopen(url)\ndata = uh.read().decode()\n# print('Retrieved', len(data), 'characters')\n\n# create dataframe from url data\njs = json.loads(data)\nlst = list(js['records'])\ndf = pd.DataFrame.from_dict(lst)\n\n# ensure we have the correct data types and set index\ndf['dateRep'] = pd.to_datetime(df['dateRep'], dayfirst = True)\ndf['day'] = pd.to_numeric(df['day'])\ndf['month'] = pd.to_numeric(df['month'])\ndf['year'] = pd.to_numeric(df['year'])\ndf['cases'] = pd.to_numeric(df['cases'])\ndf['deaths'] = pd.to_numeric(df['deaths'])\ndf['popData2028'] = pd.to_numeric(df['popData2018'])\n\n# # Restructure data\n\ndf = df.pivot_table(index = 'dateRep', columns = 'continentExp', values = 'deaths', aggfunc=np.sum).cumsum()\ncovid = df.reset_index('dateRep')\ncovid.set_index(['dateRep'], inplace=True)\n\n# Creating the Visualization\nplot = covid.plot(figsize=(9,6), linewidth=4, legend=True)\nplot.yaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))\nplot.grid(color='#d4d4d4')\nplot.set_xlabel('Date')\nplot.set_ylabel('# of Cases')\n\n\n# Adding Labels\nplot.text(x = covid.index[1], y = int(covid.max().max())+50000, s = \"COVID-19 Deaths by Continent\", fontsize = 16, weight = 'bold', alpha = .75)\nplt.subplots_adjust(left = 0.16, bottom = 0.12, right = 0.86)\nplt.show()","repo_name":"mattcockerill/covid-19","sub_path":"cases_per_continent.py","file_name":"cases_per_continent.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7616048626","text":"print('\\033[1;33m##### DESAFIO 77 #####\\033[m\\n')\nwhile True:\n word = str(input('\\nDigite uma palavra para ver as vogais: '))\n print(f'\\nNa palavra {word} temos as vogais: ', end='') \n for c in word:\n for f in range (len(c)):\n if c[f].lower() in 'aeiou':\n print(f'{c[f]} ', end='')\n esc = 'a'\n while esc not in 'SN':\n esc = str(input('\\nDeseja escrever outra palavra? [S/N] ')).strip().upper()\n if esc == 'N':\n break\nprint('\\nObrigado!\\n')\n","repo_name":"ismael211/Curso-Python","sub_path":"Python - Curso em Video 1, 2 ,3/pt077.py","file_name":"pt077.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40855030936","text":"# AULA 20 - FUNÇÕES\n\n'''\ndef titulo(txt):\n print('-' * 30)\n print(txt)\n print('-' * 30)\n\n\ntitulo(' CURSO EM VÍDEO ')\ntitulo(' APRENDA PYTHON ')\ntitulo(' THALES TERRA ')\n'''\n\n\n#def soma(a, b):\n# print(f'A = {a}, B = {b}')\n# s = a + b\n# print(f'A soma A + B = {s}')\n\n\n#soma(2, 1)\n\n\ndef contador(* num):\n tam = len(num)\n print(f'Recebi os valores {num} e são ao todo {tam} números.')\n s = sum(num)\n print(f'A soma: {s}')\n\n\ncontador(2, 1, 7)\ncontador(8, 0)\ncontador(4, 4, 7, 6, 2)\n\nprint('\\n\\n')\n\n\ndef dobra(lst):\n pos = 0\n while pos < len(lst):\n lst[pos] *= 2\n pos += 1\n\n\nvalores = [6, 3, 9, 1, 0, 2]\ndobra(valores)\nprint(valores)\n\n\ndef soma(* valores):\n s = 0\n for num in valores:\n s += num\n print(f'Somando os valores {valores} temos {s}')\n\n\nsoma(5, 2)\nsoma(2, 9, 4)\n","repo_name":"nbthales/cev-python","sub_path":"aula20.py","file_name":"aula20.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10549899642","text":"from openerp.tests import common\n\n\nclass TestChanges(common.TransactionCase):\n\n def setup(self):\n\n \"\"\"***setup change tests***\"\"\"\n super(TestChanges, self).setup()\n cr, uid, = self.cr, self.uid\n\n self.change_model = self.registry('change.management.change')\n self.project_model = self.registry('project.project')\n self.task_model = self.registry('project.task')\n self.user_model = self.registry('res.users')\n\n self.test_project_id = self.project_model.create(\n cr, uid, {'name': 'ChangeTestProject'}\n )\n self.change_owner_id = self.user_model.search(\n cr, uid, [('name', '=', 'Demo User')])[0]\n self.change_author_id = self.user_model.search(\n cr, uid, [('name', '=', 'Change User')])[0]\n self.change_second_author_id = self.user_model.search(\n cr, uid, [('login', '=', 'changemanager')])[0]\n\n self.test_change_id = self.change_model.create(\n cr, uid, {\n 'name': 'ChangeTest0001',\n 'description': 'TestChange_SkyPaintBlue',\n 'change_category_id': 1,\n 'project_id': self.test_project_id,\n 'author_id': self.change_author_id,\n 'change_owner_id': self.change_owner_id\n }\n )\n\n def test_change_owner_and_creator_added_to_followers_for_change(self):\n cr, uid = self.cr, self.uid\n change = self.change_model.browse(cr, uid, self.test_change_id)\n followers = [follower.name for follower in change.message_follower_ids]\n self.assertTrue(\n len(followers) == 3, msg='Expecting 3 followers - got:%s' % len(\n followers\n )\n )\n\n self.assertTrue(\n change.author_id.name in followers,\n msg='Change Author NOT in followers:%s' % change.author_id.name\n )\n self.assertTrue(\n change.change_owner_id.name in followers,\n msg='''\n Change Owner NOT in followers:%s\n ''' % change.change_owner_id.name\n )\n self.assertEqual(\n 'ChangeTest0001', change.name, msg='Change name incorrect'\n )\n\n def test_saving_a_change_in_users_as_followers_works(self):\n cr, uid = self.cr, self.uid\n self.change_model.write(\n cr, uid, [self.test_change_id],\n {'author_id': self.change_second_author_id}\n )\n change = self.change_model.browse(cr, uid, self.test_change_id)\n followers = [follower.name for follower in change.message_follower_ids]\n self.assertTrue(\n len(followers) == 4,\n msg='Expecting 4 followers - got:%s' % len(followers)\n )\n self.assertTrue(\n change.author_id.name in followers,\n msg='Change Author NOT in followers:%s' % change.change_id.name\n )\n\n def test_adding_a_task_on_a_change(self):\n cr, uid = self.cr, self.uid\n change = self.change_model.read(\n cr, uid, self.test_change_id, ['message_follower_ids']\n )\n followers = change['message_follower_ids']\n self.change_model.write(\n cr, uid, [self.test_change_id],\n {'change_response_ids': [\n [0, False,\n {'remaining_hours': 0,\n 'priority': '2',\n 'stage_id': 1,\n 'planned_hours': 0,\n 'user_id': uid,\n 'name': 'My New Task',\n 'date_deadline': False,\n 'sequence': 10,\n 'date_end': False,\n 'date_start': False,\n 'child_ids': [[6, False, []]],\n 'company_id': 1,\n 'work_ids': [],\n 'parent_ids': [[6, False, []]],\n 'message_follower_ids': followers,\n 'categ_ids': [[6, False, []]],\n 'project_id': 1,\n 'partner_id': False,\n 'message_ids': False,\n 'description': 'A new Task'\n }\n ]\n ]}\n )\n\n task_ids = self.change_model.read(\n cr, uid, self.test_change_id, ['change_response_ids']\n )\n tasks = self.task_model.read(\n cr, uid, task_ids['change_response_ids'], ['message_follower_ids']\n )\n for task in tasks:\n self.assertEqual(\n followers,\n task['message_follower_ids'],\n msg='Followers are not set on the associated action'\n )\n","repo_name":"sysadminmatmoz/change_management","sub_path":"change_management/tests/test_changes.py","file_name":"test_changes.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31206097600","text":"import tensorflow as tf\nfrom tensorflow.keras import models, layers\n\n\nclass DownScalingBlock(layers.Layer):\n def __init__(self, depth, kernel_size=3, **kwargs):\n super(DownScalingBlock, self).__init__(**kwargs)\n self.depth = depth\n self.kernel_size = kernel_size\n\n def build(self, input_shape):\n self.conv1 = layers.Conv2D(self.depth,\n self.kernel_size,\n padding='same',\n kernel_initializer='he_normal',\n activation='relu')\n self.conv2 = layers.Conv2D(self.depth,\n self.kernel_size,\n padding='same',\n kernel_initializer='he_normal',\n activation='relu')\n self.pool = layers.MaxPool2D()\n super(DownScalingBlock, self).build(input_shape)\n\n def call(self, inputs):\n x = self.conv1(inputs)\n x_concat = self.conv2(x)\n x = self.pool(x_concat)\n return x, x_concat\n\n def get_config(self):\n config = super(DownScalingBlock, self).get_config()\n config.update({\n 'depth': self.depth,\n 'kernel_size': self.kernel_size\n })\n return config\n\n\nclass UpScalingBlock(layers.Layer):\n def __init__(self, depth, kernel_size=3, **kwargs):\n super(UpScalingBlock, self).__init__(**kwargs)\n self.depth = depth\n self.kernel_size = kernel_size\n\n def build(self, input_shape):\n self.conv1 = layers.Conv2D(self.depth,\n self.kernel_size,\n padding='same',\n kernel_initializer='he_normal',\n activation='relu')\n self.conv2 = layers.Conv2D(self.depth,\n self.kernel_size,\n padding='same',\n kernel_initializer='he_normal',\n activation='relu')\n self.conv3 = layers.Conv2D(self.depth // 2,\n self.kernel_size,\n padding='same',\n kernel_initializer='he_normal',\n activation='relu')\n self.upsampling = layers.UpSampling2D()\n super(UpScalingBlock, self).build(input_shape)\n\n def call(self, inputs):\n x = self.conv1(inputs)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.upsampling(x)\n return x\n\n def get_config(self):\n config = super(UpScalingBlock, self).get_config()\n config.update({\n 'depth': self.depth,\n 'kernel_size': self.kernel_size\n })\n return config\n\n\nclass Unet(models.Model):\n def __init__(self):\n super(Unet, self).__init__()\n\n def build(self, input_shape):\n self.downscale1 = DownScalingBlock(64)\n self.downscale2 = DownScalingBlock(128)\n self.downscale3 = DownScalingBlock(256)\n self.upscale1 = UpScalingBlock(512)\n self.upscale2 = UpScalingBlock(256)\n self.upscale3 = UpScalingBlock(128)\n self.conv1 = layers.Conv2D(64, 3,\n padding='same',\n kernel_initializer='he_normal',\n activation='relu')\n self.conv2 = layers.Conv2D(64, 3,\n padding='same',\n kernel_initializer='he_normal',\n activation='relu')\n self.conv3 = layers.Conv2D(3, 3, \n padding='same', \n kernel_initializer='he_normal')\n self.shape = input_shape\n super(Unet, self).build(input_shape)\n\n def call(self, inputs):\n x, c1 = self.downscale1(inputs)\n x, c2 = self.downscale2(x)\n x, c3 = self.downscale3(x)\n x = layers.concatenate([self.upscale1(x), c3])\n x = layers.concatenate([self.upscale2(x), c2])\n x = layers.concatenate([self.upscale3(x), c1])\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n return x\n\n def summary(self):\n inputs = layers.Input(shape=self.shape[1:])\n outputs = self.call(inputs)\n model = tf.keras.Model(inputs, outputs)\n model.summary()\n","repo_name":"atseptember1/dog-cat-segmentation","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10666511315","text":"import cv2\nimport numpy as np\nimport pdb\nfrom Warping import Warping\nimport math\nimport config\nfrom skimage.exposure import match_histograms\nclass LayerDecomposition:\n def layer_decomposition(self, image_bgr):\n this_image_lab = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2Lab)\n this_image_lab = np.float32(this_image_lab)\n structure_layer = cv2.bilateralFilter(this_image_lab[:,:,0],8,25,25)\n detail_layer = this_image_lab[:,:,0] - structure_layer\n color_layer = this_image_lab[:, :, 1:3]\n layersImage = {\n \"structure_layer\":structure_layer,\n \"detail_layer\": detail_layer, \n \"color_layer\":color_layer\n }\n return layersImage\n def layer_recomposition(self,dict_layers):\n final_image = np.zeros((dict_layers['structure_layer'].shape[0] , dict_layers['structure_layer'].shape[1],3),np.uint8)\n final_image[:,:,0] = dict_layers['detail_layer'] + dict_layers['structure_layer']\n final_image[:,:,1:3] = dict_layers['color_layer']\n final_image = cv2.cvtColor(np.uint8( final_image), cv2.COLOR_Lab2BGR)\n return final_image\n\n def apply_layer_highlights_transfer(self,dict_layers_example,dict_layers_subject):\n # layersImage = {\n # \"structure_layer\":structure_layer,\n # \"detail_layer\": detail_layer, \n # \"color_layer\":color_layer\n # }\n # pdb.set_trace()\n # grad_Es = cv2.Sobel(dict_layers_example['structure_layer'],cv2.CV_32F ,0,1,ksize=5) + cv2.Sobel(dict_layers_example['structure_layer'],cv2.CV_32F ,1,0,ksize=5) # change this to gradient + laplacian\n # grad_Is = cv2.Sobel(dict_layers_subject['structure_layer'],cv2.CV_32F, 0,1,ksize=5) + cv2.Sobel(dict_layers_subject['structure_layer'],cv2.CV_32F, 1,0,ksize=5)\n grad_Es = cv2.Laplacian(dict_layers_example['structure_layer'],cv2.CV_32F )\n grad_Is = cv2.Laplacian(dict_layers_subject['structure_layer'],cv2.CV_32F)\n gauss_Is = cv2.GaussianBlur(dict_layers_subject['structure_layer'], (5,5), 1.5 )\n size = grad_Is.shape\n beta = self.dest_landmarkGenerator.masks_for_beta['others']\n grad_Rs = np.zeros(grad_Is.shape)\n for i in range(size[0]):\n for j in range(size[1]):\n if(abs(grad_Es[i, j])*beta[i, j] > abs(grad_Is[i, j])):\n grad_Rs[i, j] = grad_Es[i, j] \n else:\n grad_Rs[i, j] = grad_Is[i, j]\n R_struct = grad_Rs + gauss_Is\n return R_struct\n\n def other_layer_transfers(self,warp_obj):\n # Layer Decomposition\n layers_example_img = self.layer_decomposition(warp_obj.transfered_image)\n layers_destination_img = self.layer_decomposition(warp_obj.orig_image) \n \n \n for k,this_lyr in layers_example_img.items():\n if this_lyr.shape[-1] !=2:\n # pdb.set_trace()\n cv2.imwrite(self.dest_landmarkGenerator.output_dir+\"eg_\"+k+\".jpg\",this_lyr.astype(np.uint8))\n else:\n cv2.imwrite(self.dest_landmarkGenerator.output_dir+\"eg_0\"+k+\".jpg\",this_lyr[:,:,0].astype(np.uint8))\n cv2.imwrite(self.dest_landmarkGenerator.output_dir+\"eg_1\"+k+\".jpg\",this_lyr[:,:,1].astype(np.uint8))\n for k,this_lyr in layers_destination_img.items():\n if this_lyr.shape[-1] !=2:\n cv2.imwrite(self.dest_landmarkGenerator.output_dir+\"dest_\"+k+\".jpg\",this_lyr.astype(np.uint8))\n else:\n cv2.imwrite(self.dest_landmarkGenerator.output_dir+\"dest_0\"+k+\".jpg\",this_lyr[:,:,0].astype(np.uint8))\n cv2.imwrite(self.dest_landmarkGenerator.output_dir+\"dest_1\"+k+\".jpg\",this_lyr[:,:,1].astype(np.uint8))\n p = [layers_example_img,layers_destination_img]\n \n # Skin Detail Transfer\n sd_eg_wt = 0.8\n sd_src_wt = 0.2\n other_skin_detail_final = sd_eg_wt * layers_example_img['detail_layer'] + sd_src_wt * layers_destination_img['detail_layer']\n \n # Skin Color Transfer\n lambda_color = self.lambda_color\n other_skin_color_final = lambda_color * layers_example_img['color_layer'] + (1-lambda_color) * layers_destination_img['color_layer']\n\n # Highlight and Shading Transfer #CAUTION\n Rs_others = layers_destination_img['structure_layer']\n # Rs_others = self.apply_layer_highlights_transfer(layers_example_img,layers_destination_img)\n \n R_layers = {\n \"structure_layer\":Rs_others,\n \"detail_layer\": other_skin_detail_final, \n \"color_layer\":other_skin_color_final\n }\n\n return self.layer_recomposition(R_layers)\n \n # def gaussian_np_mat():\n\n\n def get_Gaussian(x, a , c_sq ):\n return a*np.exp(- np.square(x)/(2*c_sq))\n \n def get_Gaussian_num(x, a , c_sq ):\n return a*math.exp(- (x**2)/(2*c_sq))\n\n def get_euclid(p, q):\n ex = float(p[0]-q[0])\n ey = float(p[1]-q[1])\n return math.sqrt( 1.*ex*ex + 1.*ey*ey )\n def lip_makeup_transfer2(self):\n print(\"applying makeup transfer\")\n orig_image = self.warp_obj.orig_image\n transfered_image = self.warp_obj.transfered_image\n mask_lips = self.warp_obj.bitmasks['lips']\n # fetch only lips area\n lips_landmark_points = self.dest_landmarkGenerator.landmarks_dict['lips']\n (x,y,w,h) = Warping.get_bounding_rect(lips_landmark_points)\n orig_image_lips = orig_image[y:y+h, x:x+w,:].copy()\n transfered_image_lips = transfered_image[y:y+h, x:x+w,:].copy()\n mask_lips_cropped = mask_lips[y:y+h, x:x+w].copy()\n # LAB Conversion\n orig_image_lips_lab = cv2.cvtColor(orig_image_lips, cv2.COLOR_BGR2Lab)\n transfered_image_lips_lab = cv2.cvtColor(transfered_image_lips, cv2.COLOR_BGR2Lab)\n\n orig_l = orig_image_lips_lab[:,:,0]\n trans_l = transfered_image_lips_lab[:,:,0]\n\n # histogram equalization\n orig_image_lips_eq = cv2.equalizeHist(orig_l)\n transfered_image_lips_eq = cv2.equalizeHist(trans_l)\n \n\n lips_Result = orig_image_lips_eq.copy()\n lips_Result = np.zeros_like(orig_image_lips_lab)\n c_sq = config.gaussian_lips_c_sq\n a = config.gaussian_lips_a\n dist_window = 5\n for p_i in range(lips_Result.shape[0]):\n for p_j in range(lips_Result.shape[1]):\n if mask_lips_cropped[p_i][p_j]==0:\n continue\n Ip = float(orig_image_lips_eq[p_i][p_j])\n best_val = -1\n for q_i in range(max(0,p_i-dist_window),min( lips_Result.shape[0] , p_i+dist_window)):\n for q_j in range(max(0,p_j-dist_window),min( lips_Result.shape[1] , p_j+dist_window)):\n if mask_lips_cropped[q_i][q_j]==0:\n continue\n Eq = float(transfered_image_lips_eq[q_i][q_j])\n dist_pixels = LayerDecomposition.get_euclid( (p_i,p_j), (q_j,q_j))\n G_d = LayerDecomposition.get_Gaussian_num( dist_pixels/math.sqrt(2*dist_window) , a , c_sq )\n # print('deq',(Eq-Ip)/255.)\n G_l = LayerDecomposition.get_Gaussian_num((Eq-Ip)/255. , a , c_sq )\n this_val = (G_d*G_l)\n if best_val_collection\"\nprint (u1.address_collection)\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n\n","repo_name":"mattssll/python_dump","sub_path":"flask/flask_orm_map_existingtables/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74195787465","text":"import logging\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass DataQualityOperator(BaseOperator):\n\n ui_color = '#89DA59'\n\n @apply_defaults\n def __init__(\n self,\n tables: list, \n redshift_conn_id = \"redshift\",\n *args, **kwargs):\n\n super(DataQualityOperator, self).__init__(*args, **kwargs)\n self.redshift_conn_id = redshift_conn_id\n self.tables = tables\n\n def execute(self, context):\n for table in self.tables:\n self.check_greater_than_zero(table)\n self.check_no_null_values(table)\n\n \n def check_greater_than_zero(self, table):\n redshift_hook = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n records = redshift_hook.get_records(f\"select count(*) from {table}\")\n \n if len(records) < 1 or len(records[0]) < 1:\n raise ValueError(f\"Data quality check failed. {table} returned no results\")\n \n num_records = records[0][0]\n if num_records < 1:\n raise ValueError(f\"Data quality check failed. {table} contained 0 rows\")\n\n logging.info(f\"**Table {table} passed data quality check: not an empty table.\")\n \n \n def check_no_null_values(self, table):\n redshift_hook = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n query_result = redshift_hook.get_records(f\"\"\"\n select column_name\n from information_schema.columns \n where table_name = '{table}'\n and is_nullable = 'NO'\n \"\"\")\n non_nullable_columns = [i[0] for i in query_result]\n\n logging.info(f\"Table {table} has non_nullable columns: {non_nullable_columns}\")\n\n null_records = {}\n for column in non_nullable_columns:\n query_result = redshift_hook.get_records(f\"select count(*) - count({column}) from {table}\")\n num_nulls = query_result[0][0]\n if num_nulls > 0:\n null_records[column] = num_nulls\n \n if len(null_records) > 0:\n msg = f\"Data quality check failed. {table} contains null values.\\n\"\n for col, count in null_records.items():\n msg += f\"{col}: {count}\\n\"\n raise ValueError(msg)\n\n logging.info(f\"**Table {table} passed data quality check: no null.\")","repo_name":"kevinkevin556/Sparkify-Airflow","sub_path":"plugins/operators/data_quality.py","file_name":"data_quality.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71935813704","text":"import cv2\r\nimport os \r\nimport time\r\n\r\nimport numpy as np\r\n\r\nfrom .utils import get_config, loadImage, sorting_bounding_box, visual, align_item, tlwh_2_maxmin, merge_box\r\n\r\nfrom .libs.CRAFT.craft import CRAFT\r\nfrom .libs.MORAN.MORAN_pred import MORAN_predict\r\nfrom .libs.MORAN.models.moran import MORAN\r\nfrom .libs.DeepText.Deeptext_pred import Deeptext_predict, load_model_Deeptext\r\n# from .libs.detectron2.predict_img import predict_img_detectron2\r\n# from .libs.detectron2.predict_img import visualize\r\nfrom .libs.super_resolution.improve_resolution import improve_resolution\r\nfrom .libs.CRAFT.craft_predict import craft_text_detect, load_model_Craft\r\n\r\nfrom .src import yolo_detect\r\n\r\n# setup config\r\ncfg = get_config()\r\ncfg.merge_from_file('./app/LP_reg_src/configs/pipeline.yaml')\r\ncfg.merge_from_file('./app/LP_reg_src/configs/craft.yaml')\r\ncfg.merge_from_file('./app/LP_reg_src/configs/faster.yaml')\r\ncfg.merge_from_file('./app/LP_reg_src/configs/yolo.yaml')\r\n\r\n\r\nCRAFT_CONFIG = cfg.CRAFT\r\nNET_CRAFT = CRAFT()\r\nPIPELINE_CFG = cfg.PIPELINE\r\n\r\n# load all model\r\n# model yolo\r\nprint ('[LOADING] Detect model')\r\nYOLO_NET = cv2.dnn.readNet(cfg.YOLOV4.YOLO_MODEL_PATH, cfg.YOLOV4.YOLO_CFG_PATH)\r\nprint ('[LOADING SUCESS] Detect model')\r\n# model text detct\r\nprint ('[LOADING] Text detecttion model')\r\nCRAFT_MODEL = load_model_Craft(CRAFT_CONFIG, NET_CRAFT)\r\nprint ('[LOADING SUCESS] Text detection model')\r\n# model regconition\r\nprint ('[LOADING] Text regconition model')\r\nDEEPTEXT_MODEL, DEEPTEXT_PREDICTION, DEEPTEXT_CONVERTER = load_model_Deeptext(cfg.PIPELINE.DEEPTEXT_MODEL_PATH)\r\nprint ('[LOADING SUCESS] Text regconition model')\r\n\r\ndef text_recog(cfg, image_path, model, Prediction, converter):\r\n text = 'None'\r\n if cfg.PIPELINE.DEEPTEXT:\r\n list_image_path = [image_path]\r\n for img in list_image_path:\r\n text = Deeptext_predict(img, model, Prediction, converter)\r\n elif cfg.PIPELINE.MORAN:\r\n text = MORAN_predict(cfg.PIPELINE.MORAN_MODEL_PATH, image_path, MORAN)\r\n return text\r\n\r\ndef text_detect_CRAFT(img, craft_config, CRAFT_MODEL, sortbb=True, visual_img=False):\r\n # img = loadImage(image_path)\r\n bboxes, polys, score_text = craft_text_detect(img, craft_config, CRAFT_MODEL)\r\n\r\n if sortbb:\r\n polys = sorting_bounding_box(polys)\r\n if visual_img:\r\n img = visual(img, polys)\r\n\r\n return bboxes, polys, score_text\r\n\r\ndef LP_detect_faster(img, cfg):\r\n classes = ['LP']\r\n outputs = predict_img_detectron2(cfg.FASTER_RCNN.MODEL, cfg.FASTER_RCNN.CONFIG, cfg.FASTER_RCNN.CONFIDENCE_THRESHOLD, cfg.FASTER_RCNN.NUM_OF_CLASS, img)\r\n frame = visualize (outputs, img, classes)\r\n cv2.imwrite('frame.jpg', frame)\r\n boxes = outputs['instances'].pred_boxes\r\n scores = outputs['instances'].scores\r\n classes = outputs['instances'].pred_classes\r\n return boxes\r\n\r\n\r\ndef LP_detect_yolo(img, cfg, YOLO_NET):\r\n img, class_ids, boxes = yolo_detect(img, YOLO_NET, cfg)\r\n return boxes\r\n\r\n\r\ndef LP_regconition(cfg, img, YOLO_NET):\r\n \r\n # detect License plates in image \r\n detected_LP = LP_detect_yolo(img, cfg, YOLO_NET)\r\n for i in detected_LP:\r\n # store the license plate in image to new_img variable\r\n print (\"detected license plates: \", i)\r\n new_img = img[int(i[1]):int(i[3]), int(i[0]):int(i[2])]\r\n cv2.imwrite('./result/LP.jpg', new_img)\r\n\r\n # predict region of text bounding box\r\n bboxes, polys, score_text = text_detect_CRAFT(new_img, CRAFT_CONFIG, CRAFT_MODEL)\r\n LP_reg = []\r\n\r\n for index, bbox in enumerate(bboxes):\r\n # merge bbox on a line\r\n img_reg = new_img[int(bbox[0][1]):int(bbox[2][1]), int(bbox[0][0]):int(bbox[2][0])]\r\n img_reg = improve_resolution(img_reg)\r\n cv2.imwrite('./LP_reg_src/reg/img_reg.jpg', img_reg)\r\n text = text_recog (cfg, './LP_reg_src/reg/img_reg.jpg', DEEPTEXT_MODEL, DEEPTEXT_PREDICTION, DEEPTEXT_CONVERTER)\r\n LP_reg.append(text)\r\n cv2.rectangle(new_img, (bbox[0][0], bbox[0][1]), (bbox[2][0], bbox[2][1]), (0,255,0), 1)\r\n # cv2.putText(new_img, str(count), (bbox[0][0], bbox[0][1]), cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(255,0,0), thickness=1)\r\n LP_reg_text = ''.join(LP_reg)\r\n LP_reg_text = LP_reg_text.upper()\r\n cv2.putText(img, str(LP_reg_text), (int(i[0]), int(i[1])), cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=(255,255,0), thickness=3)\r\n return img\r\n\r\nif __name__ == '__main__':\r\n # start = time.time()\r\n # path = './data/reg_data'\r\n # save = './result_text_detect/'\r\n # detect_on_image(cfg, path)\r\n # for i in os.listdir(path):\r\n # path_save = os.path.join(save, i)\r\n # img_path = os.path.join(path, i)\r\n # print (path_save)\r\n # img = cv2.imread(img_path)\r\n # bboxes, polys, score_text = text_detect_CRAFT(img, CRAFT_CONFIG, NET_CRAFT, PIPELINE_CFG.Y_DIST_FOR_MERGE_BBOX, PIPELINE_CFG.EXPAND_FOR_BBOX)\r\n # for i in bboxes:\r\n # cv2.rectangle(img, (int (i[0][0]), int(i[0][1])), (int (i[2][0]), int(i[2][1])), (0,255,255), 1)\r\n # cv2.imwrite(path_save, img)\r\n\r\n img = cv2.imread('./data/LP_8539.jpg')\r\n start = time.time()\r\n img = LP_regconition(cfg, img, YOLO_NET)\r\n end = time.time()\r\n print (np.abs (start - end))\r\n cv2.imwrite('./result/8539.jpg', img)\r\n\r\n # img = cv2.imread('data/check.png')\r\n # bboxes, polys, score_text = text_detect_CRAFT(img, CRAFT_CONFIG, NET_CRAFT, PIPELINE_CFG.Y_DIST_FOR_MERGE_BBOX, PIPELINE_CFG.EXPAND_FOR_BBOX)\r\n # for i in bboxes:\r\n # cv2.rectangle(img, (int (i[0][0]), int(i[0][1])), (int (i[2][0]), int(i[2][1])), (255,0,0), 2)\r\n # cv2.imwrite('Khang.jpg', img)\r\n","repo_name":"Thanh-Hoo/Web_demo_license_plate_regconition","sub_path":"app/LP_reg_src/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13019693724","text":"\"\"\"\n[N] [G] [Q] \n[H] [B] [B] [R] [H] \n[S] [N] [Q] [M] [T] [Z] \n[J] [T] [R] [V] [H] [R] [S]\n[F] [Q] [W] [T] [V] [J] [V] [M]\n[W] [P] [V] [S] [F] [B] [Q] [J] [H]\n[T] [R] [Q] [B] [D] [D] [B] [N] [N]\n[D] [H] [L] [N] [N] [M] [D] [D] [B]\n 1 2 3 4 5 6 7 8 9 \n\nThese are the stacks, I hard code these because I can't be asked to parse through them properly\n\"\"\"\nstacks = [\n ['D','T','W','F','J','S','H','N'],\n ['H','R','P','Q','T','N','B','G'],\n ['L','Q','V'],\n ['N','B','S','W','R','Q'],\n ['N','D','F','T','V','M','B'],\n ['M','D','B','V','H','T','R'],\n ['D','B','Q','J'],\n ['D','N','J','V','R','Z','H','Q'],\n ['B','N','H','M','S']\n]\n\ninp = [line.strip() for line in open(\"2022/day05.txt\")]\ninstructions = [[int(line.split(' ')[1]), int(line.split(' ')[3]) - 1, int(line.split(' ')[5]) - 1] for line in inp]\n\ndef part1():\n local_stacks = [stack.copy() for stack in stacks.copy()]\n for instruction in instructions:\n to_move = list()\n for _ in range(instruction[0]):\n to_move.append(local_stacks[instruction[1]].pop())\n\n local_stacks[instruction[2]].extend(to_move)\n\n return ''.join([stack[-1] for stack in local_stacks])\n\ndef part2():\n local_stacks = [stack.copy() for stack in stacks.copy()]\n for instruction in instructions:\n to_move = list()\n for _ in range(instruction[0]):\n to_move.append(local_stacks[instruction[1]].pop())\n\n to_move.reverse()\n local_stacks[instruction[2]].extend(to_move)\n\n return ''.join([stack[-1] for stack in local_stacks])\n\n\nprint(part1())\nprint(part2())","repo_name":"BetterBelle/AdventOfCode","sub_path":"python/2022/day05.py","file_name":"day05.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30087740949","text":"\"\"\"Collect results of evals of different models into a single dataset\"\"\"\n\nimport argparse\nimport json\nimport os\n\nimport pandas as pd\nfrom vlne.args import Args\n\ndef parse_cmdargs():\n # pylint: disable=missing-function-docstring\n parser = argparse.ArgumentParser(\"Collect model evaluation results\")\n\n parser.add_argument(\n '-s', '--stat-file', dest = 'stat_file', required = True, type = str,\n help = 'File path inside MODELS directories where stats are stored'\n )\n\n parser.add_argument(\n '-o', '--outfile', dest = 'outfile', required = True, type = str,\n help = 'File to save results to'\n )\n\n parser.add_argument(\n '-v', '--var', required = True, dest = 'var', nargs = '+', type = str,\n help = 'Variable that is changed between models'\n )\n\n parser.add_argument(\n 'models', metavar = 'MODELS', nargs = '+', type = str,\n help = 'Directory with saved models'\n )\n\n return parser.parse_args()\n\ndef load_model_stats_list(models, stat_file, var_name):\n \"\"\"Load evaluation stats for `models`\"\"\"\n stats_list = []\n\n if len(var_name) == 1:\n var_name = var_name[0]\n\n for savedir in models:\n try:\n args = Args.load(savedir)\n stat_path = os.path.join(savedir, stat_file)\n stats = pd.read_csv(stat_path).to_dict(orient = 'records')\n\n var = args[var_name]\n\n for s in stats:\n s['gather_var'] = json.dumps(var)\n s['gather_name'] = json.dumps(var_name)\n\n except IOError:\n print(\"Failed to load model: %s\" % savedir)\n continue\n\n stats_list += stats\n\n return stats_list\n\ndef main():\n # pylint: disable=missing-function-docstring\n cmdargs = parse_cmdargs()\n stats_list = load_model_stats_list(\n cmdargs.models, cmdargs.stat_file, cmdargs.var\n )\n\n pd.DataFrame(stats_list).to_csv(cmdargs.outfile, index = False)\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"usert5432/vlne","sub_path":"scripts/eval/gather_evals.py","file_name":"gather_evals.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"34212285672","text":"import re\nimport uuid\nfrom collections import defaultdict\nfrom collections import namedtuple\n\nfrom converter.guides.tools import get_text_in_brackets\n\nCode = namedtuple('Code', ['name', 'source'])\n\n# Basic configuration - modify this to change output formatting\n_block_configuration = {\n \"chapter\": {\n \"markdown_heading\": \"##\",\n \"pretty_name\": \"\",\n \"show_count\": False\n },\n \"enumerate\": {\n \"line_indent_char\": \"\",\n \"list_heading\": \"1. \",\n \"markdown_heading\": \"\",\n \"pretty_name\": \"\",\n \"show_count\": False\n },\n \"exer\": {\n \"line_indent_char\": \"> \",\n \"markdown_heading\": \"####\",\n \"pretty_name\": \"Exercise\",\n \"show_count\": True\n },\n \"itemize\": {\n \"line_indent_char\": \"\",\n \"list_heading\": \"* \",\n \"markdown_heading\": \"\",\n \"pretty_name\": \"\",\n \"show_count\": False\n },\n \"description\": {\n \"line_indent_char\": \"\",\n \"list_heading\": \"\",\n \"markdown_heading\": \"\",\n \"pretty_name\": \"\",\n \"show_count\": False\n },\n \"lem\": {\n \"line_indent_char\": \"> \",\n \"markdown_heading\": \"####\",\n \"pretty_name\": \"Lemma\",\n \"show_count\": True\n },\n \"lstlisting\": {\n \"line_indent_char\": \" \",\n \"markdown_heading\": \"\",\n \"pretty_name\": \"\",\n \"show_count\": False\n },\n \"proof\": {\n \"line_indent_char\": \"\",\n \"markdown_heading\": \"####\",\n \"pretty_name\": \"Proof\",\n \"show_count\": False\n },\n \"prop\": {\n \"line_indent_char\": \"> \",\n \"markdown_heading\": \"####\",\n \"pretty_name\": \"Proposition\",\n \"show_count\": True\n },\n \"section\": {\n \"markdown_heading\": \"###\",\n \"pretty_name\": \"\",\n \"show_count\": False\n },\n \"subsection\": {\n \"markdown_heading\": \"####\",\n \"pretty_name\": \"\",\n \"show_count\": False\n },\n \"thm\": {\n \"line_indent_char\": \"> \",\n \"markdown_heading\": \"####\",\n \"pretty_name\": \"Theorem\",\n \"show_count\": True\n },\n \"quote\": {\n \"line_indent_char\": \"> \",\n \"markdown_heading\": \"\",\n \"pretty_name\": \"\",\n \"show_count\": False\n },\n \"quotation\": {\n \"line_indent_char\": \"> \",\n \"markdown_heading\": \"\",\n \"pretty_name\": \"\",\n \"show_count\": False\n }\n}\n\n\nclass LaTeX2Markdown(object):\n \"\"\"Initialise with a LaTeX string - see the main routine for examples of\n reading this string from an existing .tex file.\n To modify the outputted markdown, modify the _block_configuration variable\n before initializing the LaTeX2Markdown instance.\"\"\"\n\n def _make_paragraphs(self, lines):\n processed = []\n current = ''\n single_line = ('\\\\chapter', '\\\\section', '%', '\\\\index')\n is_multi_line = False\n for line in lines:\n if line.startswith(single_line):\n if current:\n processed.append(current)\n current = ''\n processed.append(line)\n continue\n elif line.startswith('\\\\begin'):\n if current:\n processed.append(current)\n current = ''\n is_multi_line = True\n processed.append(line)\n continue\n elif not line:\n if current:\n processed.append(current)\n current = ''\n processed.append(line)\n continue\n\n if is_multi_line:\n processed.append(line)\n else:\n if current:\n current += ' ' + line\n else:\n current = line\n if line.startswith('\\\\end'):\n is_multi_line = False\n if current:\n processed.append(current)\n return processed\n\n def __init__(\n self, latex_array, refs={}, chapter_num=1, figure_num=0,\n exercise_num=0, remove_trinket=False, remove_exercise=False\n ):\n latex_string = '\\n'.join(self._make_paragraphs(latex_array))\n self._refs = refs\n self._chapter_num = chapter_num\n self._exercise_counter = 0\n self._figure_counter = 0\n self._figure_counter_offset = figure_num\n self._exercise_counter_offset = exercise_num\n self._block_configuration = _block_configuration\n self._latex_string = latex_string\n self._block_counter = defaultdict(lambda: 1)\n self._pdfs = []\n self._source_codes = []\n self._remove_trinket = remove_trinket\n self._remove_exercise = remove_exercise\n\n # Precompile the regexes\n\n # Select everything in the main matter\n self._main_re = re.compile(r\"\"\"\\\\begin{document}\n (?P
    .*)\n \\\\end{document}\"\"\",\n flags=re.DOTALL + re.VERBOSE)\n\n # Select all our block materials.\n self._block_re = re.compile(r\"\"\"\\\\begin{(?Pexer|proof|thm|lem|prop|quote|quotation)} # block name\n (\\[(?P.*?)\\])? # Optional block title\n (?P.*?) # Non-greedy block contents\n \\\\end{(?P=block_name)}\"\"\", # closing block\n flags=re.DOTALL + re.VERBOSE)\n\n # Select all our list blocks\n self._lists_re = re.compile(r\"\"\"\\\\begin{(?Penumerate|itemize|description)} # list name\n (\\[.*?\\])? # Optional enumerate settings i.e. (a)\n (?P.*?) # Non-greedy list contents\n \\\\end{(?P=block_name)}\"\"\", # closing list\n flags=re.DOTALL + re.VERBOSE)\n\n # Select all our code blocks\n self._code_re = re.compile(r\"\"\"\\\\begin{(?Pcode|stdout)}\n (?P.*?) # Non-greedy list contents\n \\\\end{(?P=block_name)}\"\"\", # closing list\n flags=re.DOTALL + re.VERBOSE)\n\n self._small_re = re.compile(r\"\"\"\\\\begin{small}\n (?P.*?) # Non-greedy list contents\n \\\\end{small}\"\"\", # closing list\n flags=re.DOTALL + re.VERBOSE)\n\n # Select all our code blocks\n self._trinket_re = re.compile(r\"\"\"\\\\begin{(?Ptrinket)}[\\[\\]0-9]*{(?P.*?)}\n (?P.*?) # Non-greedy list contents\n \\\\end{(?P=block_name)}\"\"\", # closing list\n flags=re.DOTALL + re.VERBOSE)\n\n self._exercise_re = re.compile(r\"\"\"\\\\begin{exercise}(.*?)\\n\n (?P.*?)\n \\\\end{exercise}\"\"\",\n flags=re.DOTALL + re.VERBOSE)\n\n self._figure_re = re.compile(r\"\"\"\\\\begin{figure}(.*?)\\n\n (?P.*?)\n \\\\end{figure}\"\"\", flags=re.DOTALL + re.VERBOSE)\n\n self._eqnarray_re = re.compile(r\"\"\"\\\\begin{(?Peqnarray\\*)}\n (?P.*?)\n \\\\end{(?P=block_name)}\"\"\", flags=re.DOTALL + re.VERBOSE)\n\n self._refs_re = re.compile(r\"\"\"\\\\ref{(?P.*?)}\"\"\", flags=re.DOTALL + re.VERBOSE)\n self._page_refs_re = re.compile(r\"\"\"\\\\pageref{(?P.*?)}\"\"\", flags=re.DOTALL + re.VERBOSE)\n\n # Select all our headers\n self._header_re = re.compile(r\"\"\"\\\\(?Pchapter|section|subsection) # Header\n \\**{(?P.*?)}\"\"\", # Header title\n flags=re.DOTALL + re.VERBOSE)\n\n self._table_re = re.compile(r\"\"\"\\\\begin{(?Ptable|tabular)} # block name\n (?P.*?) # Non-greedy block contents\n \\\\end{(?P=block_name)}\"\"\", # closing block\n flags=re.DOTALL + re.VERBOSE)\n\n def _replace_header(self, matchobj):\n \"\"\"Creates a header string for a section/subsection/chapter match.\n For example, \"### 2 - Integral Calculus\\n\" \"\"\"\n\n header_name = matchobj.group('header_name')\n header_contents = matchobj.group('header_contents')\n\n header = self._format_block_name(header_name)\n\n block_config = self._block_configuration[header_name]\n\n # If we have a count, separate the title from the count with a dash\n separator = \"-\" if block_config.get(\"show_count\") else \"\"\n\n output_str = \"{header} {separator} {title}\\n\".format(\n header=header,\n title=header_contents,\n separator=separator)\n\n return output_str\n\n def _replace_block(self, matchobj):\n \"\"\"Create a string that replaces an entire block.\n The string consists of a header (e.g. ### Exercise 1)\n and a block, containing the LaTeX code.\n The block may be optionally indented, blockquoted, etc.\n These settings are customizable through the config.json\n file\"\"\"\n\n block_name = matchobj.group('block_name')\n block_contents = matchobj.group('block_contents')\n # Block title may not exist, so use .get method\n block_title = matchobj.groupdict().get('block_title')\n\n # We have to format differently for lists\n if block_name in {\"itemize\", \"enumerate\", \"description\"}:\n formatted_contents = self._format_list_contents(block_name,\n block_contents)\n else:\n formatted_contents = self._format_block_contents(block_name,\n block_contents)\n\n header = self._format_block_name(block_name, block_title)\n\n output_str = \"{header}\\n\\n{block_contents}\".format(\n header=header,\n block_contents=formatted_contents)\n return output_str\n\n def _format_block_contents(self, block_name, block_contents):\n \"\"\"Format the contents of a block with configuration parameters\n provided in the self._block_configuration attribute\"\"\"\n\n block_config = self._block_configuration[block_name]\n\n line_indent_char = block_config[\"line_indent_char\"]\n\n output_str = \"\"\n for line in block_contents.lstrip().rstrip().split(\"\\n\"):\n line = line.lstrip().rstrip()\n line = line.replace(\"\\\\\\\\\", \"
    \")\n indented_line = line_indent_char + line + \"\\n\"\n output_str += indented_line\n return output_str\n\n def _format_list_contents(self, block_name, block_contents):\n block_config = self._block_configuration[block_name]\n\n list_heading = block_config[\"list_heading\"]\n\n output_str = \"\"\n for line in block_contents.lstrip().rstrip().split(\"\\n\"):\n line = line.lstrip().rstrip()\n line = line.replace(\"\\\\\\\\\", \"
    \")\n\n markdown_list_line = line.replace(r\"\\item\", list_heading)\n if block_name == \"description\":\n if \"\\\\term\" in line:\n markdown_list_line = markdown_list_line.replace(\"\\\\term\", list_heading)\n markdown_list_line = markdown_list_line.replace(\"{\", \"**\")\n markdown_list_line = markdown_list_line.replace(\"}\", \"**\")\n elif \"\\\\item\" in line:\n markdown_list_line = markdown_list_line.replace(\"[\", \"**\")\n markdown_list_line = markdown_list_line.replace(\"]\", \"**\")\n output_str += markdown_list_line + \"\\n\"\n if block_name == \"description\":\n output_str += \"\\n\"\n return output_str\n\n def _format_block_name(self, block_name, block_title=None):\n \"\"\"Format the Markdown header associated with a block.\n Due to the optional block_title, we split the string construction\n into two parts.\"\"\"\n\n block_config = self._block_configuration[block_name]\n pretty_name = block_config[\"pretty_name\"]\n show_count = block_config[\"show_count\"]\n markdown_heading = block_config[\"markdown_heading\"]\n\n block_count = self._block_counter[block_name] if show_count else \"\"\n self._block_counter[block_name] += 1\n\n output_str = \"{markdown_heading} {pretty_name} {block_count}\".format(\n markdown_heading=markdown_heading,\n pretty_name=pretty_name,\n block_count=block_count)\n\n if block_title:\n output_str = \"{output_str} ({block_title})\".format(\n output_str=output_str,\n block_title=block_title)\n\n return output_str.lstrip().rstrip()\n\n def _refs_block(self, matchobj):\n ref_name = matchobj.group('ref_name')\n refs = self._refs.get(ref_name, {'ref': ref_name})\n return '{}'.format(refs.get('ref', ''))\n\n def _page_refs_block(self, matchobj):\n ref_name = matchobj.group('ref_name')\n refs = self._refs.get(ref_name, {'pageref': ref_name})\n pageref = refs.get('pageref', '')\n if isinstance(pageref, str):\n return 'in section {}'.format(pageref)\n else:\n return str(pageref)\n\n def _eqnarray_block(self, matchobj):\n block_contents = matchobj.group('block_contents')\n block_contents = re.sub(r\"^&& {2}\", \"\", block_contents, flags=re.MULTILINE)\n block_contents = re.sub(r\"^& \", \"\", block_contents, flags=re.MULTILINE)\n block_contents = re.sub(r\" &$\", \"\", block_contents, flags=re.MULTILINE)\n block_contents = re.sub(r\" & \\\\\\\\$\", \" \\\\\\\\\\\\\\\\\", block_contents, flags=re.MULTILINE)\n return \"$${}$$\".format(block_contents, flags=re.MULTILINE)\n\n def _figure_block(self, matchobj):\n block_contents = matchobj.group('block_contents')\n self._figure_counter += 1\n\n images = []\n caption = 'Figure {}.{} '.format(\n self._chapter_num, self._figure_counter + self._figure_counter_offset\n )\n\n for line in block_contents.strip().split(\"\\n\"):\n if line.startswith(\"\\\\includegraphics\"):\n images.append(get_text_in_brackets(line))\n elif line.startswith(\"\\\\caption\"):\n caption += get_text_in_brackets(line)\n\n markdown_images = []\n\n for image in images:\n if image.lower().endswith('.pdf'):\n self._pdfs.append(image)\n image = image.replace('.pdf', '.jpg')\n markdown_images.append(\n \"![{}]({})\".format(caption, image)\n )\n\n return '{}\\n\\n**{}**'.format(\n '\\n'.join(markdown_images),\n caption\n )\n\n def _exercise_block(self, matchobj):\n block_contents = matchobj.group('block_contents')\n\n self._exercise_counter += 1\n prefix = \"**Exercise {}.{}:**\\n\"\\\n .format(self._chapter_num, self._exercise_counter + self._exercise_counter_offset)\n if self._remove_exercise:\n prefix = \"\"\n\n return prefix + block_contents\n\n def _format_table(self, matchobj):\n block_contents = matchobj.group('block_contents')\n\n out_str = \"\"\n caption = \"\"\n table = []\n\n for line in block_contents.strip().split(\"\\n\"):\n line = line.rstrip(\"\\\\\")\n if line == \"\\\\hline\":\n out_str += \"\\n\"\n continue\n elif line.startswith(\"\\\\end\") or line.startswith(\"\\\\begin\") or line.startswith(\"[!ht]\") or '&' not in line:\n continue\n elif line.startswith(\"\\\\caption\"):\n caption = line[9:-1]\n continue\n out_str += line\n table.append(line.split(' & '))\n\n heading = True\n out = \"\"\n\n if caption:\n out += \"**Table: \" + caption + \"**\\n\"\n\n for row in table:\n pos = 0\n for col in row:\n out += \"|\" + col.replace('|', '|')\n if heading:\n out += '|\\n'\n for _ in row:\n out += \"|-\"\n pos += 1\n heading = False\n out += '|\\n'\n\n return out\n\n def _code_block(self, matchobj):\n block_contents = matchobj.group('block_contents')\n try:\n file_name = matchobj.group('file_name')\n self._source_codes.append(Code(file_name, block_contents))\n except IndexError:\n pass\n block_name = matchobj.group('block_name')\n if self._remove_trinket and block_name == 'trinket':\n return ''\n # % in code block is not latex comments, escape it and replace later\n block_contents = re.sub(r\"%\", r\"\\\\%\", block_contents)\n return \"```code{}```\".format(block_contents)\n\n def _inline_code_block(self, matchobj):\n block_contents = matchobj.group('block_contents')\n block_contents = re.sub(r\"\\\\\\\\\", r\"\\\\\", block_contents)\n return \"`{}`\".format(block_contents)\n\n def _latex_to_markdown(self):\n \"\"\"Main function, returns the formatted Markdown as a string.\n Uses a lot of custom regexes to fix a lot of content - you may have\n to add or remove some regexes to suit your own needs.\"\"\"\n\n # Get main content, skipping preamble and closing tags.\n try:\n output = self._main_re.search(self._latex_string).group(\"main\")\n except AttributeError:\n output = self._latex_string\n\n # Reformat, lists, blocks, and headers.\n output = self._lists_re.sub(self._replace_block, output)\n output = self._block_re.sub(self._replace_block, output)\n output = self._header_re.sub(self._replace_header, output)\n output = self._table_re.sub(self._format_table, output)\n\n # Fix emph, textbf, texttt formatting\n output = re.sub(r\"\\\\emph{(.*?)}\", r\"*\\1*\", output)\n output = re.sub(r\"\\\\textbf{(.*?)}\", r\"**\\1**\", output)\n output = re.sub(r\"\\\\texttt{(.*?)}\", r\"`\\1`\", output)\n\n output = re.sub(r\"{\\\\em (.*?)}\", r\"*\\1*\", output)\n output = re.sub(r\"{\\\\it (.*?)}\", r\"*\\1*\", output)\n output = re.sub(r\"{\\\\bf (.*?)}\", r\"**\\1**\", output)\n output = re.sub(r\"{\\\\sf (.*?)}\", r\"**\\1**\", output)\n\n # Fix ``\n output = re.sub(\"``\", \"“\", output)\n\n # Fix ``\n output = re.sub(\"''\", \"”\", output)\n\n output = self._code_re.sub(self._code_block, output)\n output = self._trinket_re.sub(self._code_block, output)\n output = self._small_re.sub(r\"\\1\", output)\n\n # Fix \\% formatting\n percent_token = str(uuid.uuid4())\n output = re.sub(r\"\\\\%\", percent_token, output)\n output = re.sub(\"%(.*?)$\", \"\", output, flags=re.MULTILINE)\n output = re.sub(percent_token, \"%\", output)\n\n output = self._exercise_re.sub(self._exercise_block, output)\n output = self._figure_re.sub(self._figure_block, output)\n output = self._refs_re.sub(self._refs_block, output)\n output = self._page_refs_re.sub(self._page_refs_block, output)\n output = self._eqnarray_re.sub(self._eqnarray_block, output)\n\n output = re.compile(r\"\\\\java{(?P.*?)}\").sub(self._inline_code_block, output)\n output = re.compile(r\"\\\\verb\\\"(?P.*?)\\\"\").sub(self._inline_code_block, output)\n output = re.compile(r\"\\\\verb'(?P.*?)'\").sub(self._inline_code_block, output)\n\n output = re.sub(r\"\\\\url{(.*?)}\", r\"[\\1](\\1)\", output)\n\n output = re.sub(r\"\\\\href{(.*?)}{(\\\\[a-z]+)?\\s?(.*?)}\", r\"[\\1](\\3)\", output)\n\n output = re.sub(r\"{\\\\tt (.*?)}\", r\"`\\1`\", output)\n\n output = re.sub(r\"\\\\\\[ (.*?) \\\\\\]\", r\"$ \\1 $\", output)\n\n output = re.sub(r\"\\\\'{(.*?)}\", r\"\\1́\", output)\n\n output = re.sub(r\"(\\S+)(~)(\\S+)\", r\"\\1 \\3\", output)\n output = re.sub(r\"(~)(\\S+)\", r\" \\2\", output)\n output = re.sub(r\"(\\S+)(~)\", r\"\\1 \", output)\n\n output = re.sub(r\"^\\\\\\\\ \", \"
    \", output, flags=re.MULTILINE)\n\n return output.lstrip().rstrip()\n\n def to_markdown(self):\n return self._latex_to_markdown()\n\n def to_latex(self):\n return self._latex_string\n\n def get_pdfs_for_convert(self):\n return self._pdfs\n\n def get_figure_counter(self):\n return self._figure_counter\n\n def get_exercise_counter(self):\n return self._exercise_counter\n\n def get_source_codes(self):\n return self._source_codes\n","repo_name":"codio-content/ThinkJava","sub_path":"codio-student-unit-exclude/book-converter/converter/latex2markdown.py","file_name":"latex2markdown.py","file_ext":"py","file_size_in_byte":20900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21114293609","text":"import functools\nfrom typing import Any, Optional, Sequence, Tuple\n\nfrom learned_optimization import notebook_utils as nu\nfrom learned_optimization.baselines import utils\nfrom matplotlib import pyplot as plt\nimport numpy as onp\n\n\n@functools.lru_cache(None)\ndef _best_curve(\n set_name: str,\n task: str,\n select_best_with: str = \"last\"\n) -> Tuple[onp.ndarray, onp.ndarray, Tuple[onp.ndarray, onp.ndarray]]:\n \"\"\"Load the best learning curve for given hparam set and task name.\"\"\"\n archive = utils.load_archive(task, set_name)\n\n if select_best_with == \"last\":\n last_val = onp.mean(archive[\"eval/train/loss\"][:, :, -1:], axis=2)\n else:\n raise NotImplementedError()\n\n last_val = onp.mean(last_val, axis=1)\n last_val[onp.isnan(last_val)] = 99999999999\n best_lr_idx = onp.argmin(last_val)\n\n mean_curve = onp.mean(archive[\"eval/train/loss\"][best_lr_idx, :, :], axis=0)\n max_curve = onp.max(archive[\"eval/train/loss\"][best_lr_idx, :, :], axis=0)\n min_curve = onp.min(archive[\"eval/train/loss\"][best_lr_idx, :, :], axis=0)\n xs = archive[\"eval/xs\"][0, 0]\n return xs, mean_curve, (min_curve, max_curve)\n\n\n@functools.lru_cache(None)\ndef load_curves(\n task: str, sets: Sequence[str]\n) -> Sequence[Tuple[onp.ndarray, onp.ndarray, Tuple[onp.ndarray, onp.ndarray]]]:\n \"\"\"Load all learning curves for task and a list of hparam set names.\"\"\"\n return nu.threaded_tqdm_map(30, functools.partial(_best_curve, task=task),\n sets)\n\n\ndef plot_tasks_and_sets(\n task: str,\n opt_sets: Sequence[str],\n ax: Optional[Any] = None,\n alpha_on_confidence: float = 0.1,\n confidence_alpha: float = 1.0,\n ema: float = 0.9,\n initial_shift: int = 500,\n legend: bool = True,\n labels: Optional[Sequence[str]] = None,\n colors: Optional[Sequence[Any]] = None,\n always_include_first_opt=False,\n):\n \"\"\"Plot performance of a task with respect to the best of each hparam set.\n\n Args:\n task: Name of task to plot.\n opt_sets: List of hparam sets with data precomputed to plot.\n ax: axis to plot onto. If not set, a new figure is created.\n alpha_on_confidence: Alpha value of the confidence interval.\n confidence_alpha: Alpha on lines surrounding confidence fill_between.\n ema: ema value to smooth values with.\n initial_shift: Used to set the max ylim.\n legend: to plot a legend or not.\n labels: Labels to plot in legend. If None, use opt_sets.\n always_include_first_opt: Ensure that the y-lim always includes the first optimizer.\n \"\"\"\n if colors is None:\n colors = nu.colors_for_num(len(opt_sets))\n\n curves = load_curves(task, tuple(opt_sets))\n\n vmax = 9999999999\n best_vals = []\n ylim_top_vals = []\n\n if ax is None:\n unused_fig, ax = plt.subplots()\n\n if labels is None:\n labels = opt_sets\n else:\n assert len(labels) == len(opt_sets)\n\n for oi, label in enumerate(labels):\n xs, curve, (min_c, max_c) = curves[oi]\n curve = nu.ema(curve, ema)\n min_c = nu.ema(min_c, ema)\n max_c = nu.ema(max_c, ema)\n\n ax.plot(xs, curve, label=label, color=colors[oi], lw=2)\n if alpha_on_confidence:\n ax.fill_between(\n xs, min_c, max_c, color=colors[oi], alpha=alpha_on_confidence)\n ax.plot(xs, min_c, color=colors[oi], lw=0.3, alpha=confidence_alpha)\n ax.plot(xs, max_c, color=colors[oi], lw=0.3, alpha=confidence_alpha)\n best_vals.append(onp.nanmin(min_c))\n shift = (onp.argmax(xs > initial_shift))\n ylim_top_vals.append(max_c[shift])\n\n vmin = onp.nanmin(best_vals)\n vmax = onp.nanmin(ylim_top_vals)\n if always_include_first_opt:\n vmax = onp.maximum(ylim_top_vals[0], vmax)\n ax.set_ylim(vmin, vmax)\n ax.set_title(task)\n if legend:\n nu.legend_to_side(ax=ax)\n","repo_name":"google/learned_optimization","sub_path":"learned_optimization/baselines/vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":702,"dataset":"github-code","pt":"81"} +{"seq_id":"34020052803","text":"#Candelabra ROOM\ndef candelabra():\n from random import random\n from random import randint\n from time import sleep\n #INSTRUCTIONS/INTRO\n #print(\"Complete following task to move on to next level: \")\n #print('-'*50)\n #print()\n #print(\"There is a 5-candle candelabra that has at least one candle is lit at any \\ngiven time. The objective is to have ALL candles lit.\")\n #print(\"***Be warned*** You only have 10 MATCHES and remember that candles \\nto either side will switch to either ON or OFF.\")\n #print(\"If you pick FIRST(1) or LAST(5) candle, candle on the OPPOSITE end will switch, \\nas well as the one NEXT your candle.\")\n #print('-'*50)\n #print()\n # RANDOMLY STARTING CANDELABRA SEQUENCE (chooses candles that are ON or OFF)\n #userCandelabra is user-viewed candelabra\n userCandelabra=[]\n on=1\n off=0\n #candelabra is running in the back ground, calculating on or off candles \n candelabra=[]\n valid=False\n \n while not valid:\n for i in range(0,5):\n #picks ON or OFF \n state=round(random())\n if state==on:\n candelabra+=[on]\n userCandelabra+=['Candle '+str(i+1)+': ON']\n else:\n candelabra+=[off]\n userCandelabra+=['Candle '+str(i+1)+': OFF']\n print()\n print(\"You chose to climb the stairs to the second level of the mansion.\")\n print(\"As you walk up the steps, you notice that the only light shining \")\n print(\"on the second level is a 5-candle candelabra sitting on a table.\")\n print(\"Only %d of its candles are lit...\" %(candelabra.count(1)))\n print()\n sleep(3)\n print(\"Everything else is consumed by an immense darkness...\")\n print()\n sleep(3)\n print(\"You decide that it would be a good idea to take the candelabra with you.\")\n print(\"However, as you try to pick it up the candelabra appears to be cemented\")\n print(\"to the table by some force...\")\n print(\"The voice returns...\")\n print()\n sleep(10)\n print('\"Light the candles... Traverse the darkness...\"')\n print()\n sleep(3)\n print(\"After the voice fades you notice 10 matches in a matchbox are also\")\n print(\"sitting on the table.\")\n print()\n sleep(3)\n print(\"You pick up the matchbox and begin to light the candles.\")\n print('\\n'*2)\n sleep(3)\n #PROOFING THAT NO CANDELABRA IS ALL (ON) OR ALL (OFF)\n if 1 in candelabra and 0 in candelabra:\n valid=True\n for i in userCandelabra:\n print(i)\n print()\n elif 1 in candelabra:\n userCandelabra.pop()\n candelabra.pop()\n candelabra.append(0)\n userCandelabra.append('Candle 5: OFF')\n valid = True\n for i in userCandelabra:\n print(i)\n print()\n else:\n userCandelabra.pop()\n candelabra.pop()\n candelabra.append(1)\n userCandelabra.append('Candle 5: ON')\n for i in userCandelabra:\n print(i)\n print()\n valid=True\n \n #MATCHES AND TRIES \n matches=10\n tries=0\n #looping until all candles are ON\n while 0 in candelabra:\n # looping until 10 matches are used\n while 0]*?)href=\\\"([^\\\"]*)\\\"\", contents)\n pages[filename] = set(links) - {filename}\n\n # Only include links to other pages in the corpus\n for filename in pages:\n pages[filename] = set(\n link for link in pages[filename]\n if link in pages\n )\n\n return pages\n\n\ndef transition_model(corpus, page, damping_factor):\n \"\"\"\n Return a probability distribution over which page to visit next,\n given a current page.\n\n With probability `damping_factor`, choose a link at random\n linked to by `page`. With probability `1 - damping_factor`, choose\n a link at random chosen from all pages in the corpus.\n \"\"\"\n # Get total Number of links and create our return dictionary\n n = len(corpus)\n probability_distribution = dict()\n for i in corpus:\n probability_distribution[i]=0\n\n # Add probabilities of links that are linked to by the page\n direct_links = corpus[page]\n n2 = len(direct_links)\n for j in direct_links:\n probability_distribution[j]=round (damping_factor * 1 / n2,4)\n \n # Add probabilities coming from the fact that a random page might be selected\n if n2 == 0:\n for k in probability_distribution:\n probability_distribution[k] = round ((1 / n),4)\n else:\n for k in probability_distribution:\n probability_distribution[k] += round(((1-damping_factor) * 1 / n),4)\n\n return probability_distribution\n\ndef sample_pagerank(corpus, damping_factor, n):\n \"\"\"\n Return PageRank values for each page by sampling `n` pages\n according to transition model, starting with a page at random.\n\n Return a dictionary where keys are page names, and values are\n their estimated PageRank value (a value between 0 and 1). All\n PageRank values should sum to 1.\n \"\"\"\n \n page_rank = dict()\n chance = list()\n model = dict()\n values = list()\n # Create all pages with initial rank set to 0\n for i in corpus:\n page_rank [i] = 0\n\n # Generate first random page and add it to total number of pages for that page\n previous_page = random.choice(list(page_rank.keys()))\n page_rank [previous_page]+=1\n\n # Get transition model of the previous page\n model = transition_model(corpus, previous_page, damping_factor)\n \n\n for j in range(n-1):\n \n # Generate a random page based on likelihood and add that page to total\n \n values = list(model.keys())\n chance = [model[x] for x in model]\n previous_page = random.choices(values, weights = chance, k = 1)\n previous_page_use = previous_page[0]\n page_rank[previous_page_use] +=1\n\n # Get transition model\n model.clear()\n model = transition_model(corpus, previous_page_use, damping_factor)\n\n total = 0\n\n # Get total number of pages\n for k in page_rank.keys():\n total += page_rank[k]\n \n if (total != n ):\n raise NotImplementedError \n \n for k in page_rank.keys():\n page_rank[k] = round((page_rank[k] / n),4)\n \n return page_rank\n\n \n\n\n\ndef iterate_pagerank(corpus, damping_factor):\n \"\"\"\n Return PageRank values for each page by iteratively updating\n PageRank values until convergence.\n\n Return a dictionary where keys are page names, and values are\n their estimated PageRank value (a value between 0 and 1). All\n PageRank values should sum to 1.\n \"\"\"\n iterative_rank = dict()\n n = len (corpus)\n pr = dict()\n \n\n # Create the dictionary for page ranks and assign initial ranks\n for i in corpus:\n iterative_rank [i] = (1/n)\n iteration = 0\n while (iteration == 0):\n # Go through each webpage\n for k in iterative_rank:\n sigma = 0\n \n # Add random part\n pr [k] = (1-damping_factor)/n\n\n # go through each i page and add to total probability if correct\n for summation in corpus:\n if (k in corpus[summation]):\n n2 = len (corpus[summation])\n sigma += iterative_rank [summation]/n2\n \n pr [k] += damping_factor * sigma\n # See if we need to keep iterating\n for m in pr:\n temp = abs((pr[m]-iterative_rank[m]))\n if (temp>0.001):\n iteration +=1\n \n if (iteration>0):\n iteration = 0\n iterative_rank = pr.copy()\n else:\n iteration = 1000000\n iterative_rank = pr.copy()\n\n return iterative_rank\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"henrykot/pagerank","sub_path":"main/pagerank/pagerank.py","file_name":"pagerank.py","file_ext":"py","file_size_in_byte":5728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5990118391","text":"'''\nnvr: helper utils for nvim remote feature\n'''\nimport os\nimport sys\n\nimport argparse\nimport logging\nimport pathlib\nimport subprocess\n\n\ndef parse_arguments():\n '''\n parse command line arguments\n '''\n parser = argparse.ArgumentParser(prog='nvr')\n parser.add_argument('--version', action='version', version='%(prog)s 1.0')\n\n parser.add_argument(\"-d\",\n \"--debug\",\n help=\"print debug information\",\n action=\"count\",\n default=0)\n parser.add_argument(\n \"--server-name\",\n help=\n \"remote server name to connect to, if not specified, NVIM_LISTEN_ADDRESS env will be used\",\n type=str,\n required=False,\n default=None)\n parser.add_argument(\"--no-start\",\n help=\"do not start new instance if no server found\",\n action=\"store_true\",\n default=False)\n parser.add_argument(\n \"--editor\",\n help=\"editor to use when start new instance, default use nvim\",\n required=False,\n type=str,\n default='nvim')\n parser.add_argument(\"addition_args\",\n type=str,\n metavar=\"\",\n nargs=\"*\",\n help=\"file list to open\")\n\n return parser.parse_args()\n\n\ndef get_server_name(args):\n if args.server_name:\n return args.server_name\n\n try:\n return os.environ['NVIM_LISTEN_ADDRESS']\n except KeyError:\n logging.error('unable find server name')\n sys.exit(1)\n\n\ndef find_server(server_name):\n cmd_line = ['nvim', '--server', server_name, '--remote-expr', 'version']\n\n logging.debug('find_server, cmd line:%s' % cmd_line)\n try:\n proc = subprocess.run(cmd_line, check=True, capture_output=True, timeout=3)\n logging.debug('remote version:%s' % proc.stdout)\n except subprocess.CalledProcessError:\n return False\n except subprocess.TimeoutExpired:\n return False\n return True\n\n\ndef get_editor_cmdline(editor):\n return editor.split(' ')\n\n\ndef norm_addition_args(addition_args):\n return map(\n lambda x: x\n if x.startswith('+') else pathlib.Path(x).resolve().as_posix(),\n addition_args)\n\n\ndef main():\n args = parse_arguments()\n\n if args.debug > 0:\n logging.getLogger('').setLevel(logging.DEBUG)\n else:\n logging.getLogger('').setLevel(logging.INFO)\n\n server_name = get_server_name(args)\n\n start_server = False\n\n if not find_server(server_name):\n if args.no_start:\n logging.error('unable to locate server %s' % server_name)\n sys.exit(2)\n\n start_server = True\n\n cmd_line = get_editor_cmdline(args.editor) if start_server else ['nvim']\n\n cmd_line.append('--listen' if start_server else '--server')\n cmd_line.append(server_name)\n\n if not start_server:\n cmd_line.append('--remote')\n\n cmd_line.extend(norm_addition_args(args.addition_args))\n\n logging.debug('running cmd line:%s' % cmd_line)\n subprocess.run(cmd_line, check=True, shell=False)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"stonewell/nvr","sub_path":"src/nvr/cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70749955146","text":"import logging\r\nfrom logging.handlers import SMTPHandler\r\nfrom logging.config import dictConfig\r\nfrom logging.handlers import RotatingFileHandler\r\nfrom flask.logging import default_handler\r\n\r\n\r\ndictConfig(\r\n {\r\n \"version\": 1,\r\n \"formatters\": {\r\n \"default\": {\r\n \"format\": \"[%(asctime)s] %(levelname)s in %(module)s: %(message)s\",\r\n }\r\n },\r\n \"handlers\": {\r\n \"wsgi\": {\r\n \"class\": \"logging.StreamHandler\",\r\n \"stream\": \"ext://flask.logging.wsgi_errors_stream\",\r\n \"formatter\": \"default\",\r\n }\r\n },\r\n \"root\": {\"level\": \"INFO\", \"handlers\": [\"wsgi\"]},\r\n }\r\n)\r\n\r\nmail_handler = SMTPHandler(\r\n mailhost=\"127.0.0.1\",\r\n fromaddr=\"server-error@ohc.io\",\r\n toaddrs=[\"admin@ohc.io\"],\r\n subject=\"Application Error\",\r\n)\r\n\r\nmail_handler.setLevel(logging.ERROR)\r\nmail_handler.setFormatter(\r\n logging.Formatter(\"[%(asctime)s] %(levelname)s in %(module)s: %(message)s\")\r\n)\r\n\r\nfilehandler = RotatingFileHandler(\"./.logs/app.log\", maxBytes=10000000, backupCount=1)\r\n\r\n\r\ndef set_logger_handlers(app):\r\n for logger in (\r\n app.logger,\r\n logging.getLogger(\"werkzeug\"),\r\n logging.getLogger(\"sqlalchemy\"),\r\n ):\r\n if not app.debug:\r\n\r\n logger.addHandler(filehandler)\r\n logger.addHandler(mail_handler)\r\n else:\r\n logger.addHandler(default_handler)\r\n logger.addHandler(mail_handler)\r\n","repo_name":"Lorioux/template-openhealthcare","sub_path":"openhcs/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34451227526","text":"# Question 1\ndef max_of_three_numbers(a, b, c):\n a = int(input())\n b = int(input())\n c = int(input())\n numbers = [a, b, c] \n print(max(numbers))\n\n\n# Question 2\ndef sum_of_numbers_in_a_list(): \n list = []\n number = int(input('How many numbers: '))\n for n in range(number):\n numbers = int(input('Enter number '))\n list.append(numbers)\n print(\"Sum of numbers in given list is :\", sum(list))\n\n\n# Question 3\n# def multiple_of_numbers_in_a_list():\n# list = []\n# number = int(input('How many numbers: '))\n# for n in range(number):\n# numbers = int(input('Enter number '))\n# list.append(numbers)\n# print(\"Sum of numbers in given list is :\", math.prod(list))\n# or\ndef multiple_of_numbers_in_a_list(list):\n print(\"Sum of numbers in given list is :\", math.prod(list))\n\n# Question 4\n# def reverse_a_string():\n# string = input(\"Enter a string: \")\n# string = string.lower()\n# print(string[::-1])\n string = input('Tell me a word of your choice: ')\n last = len(ast)\n for i in range(last): \n print(last-1-i)\n for i in range(last):\n print(string[last-1-i])\n rast = \"\"\n for i in range(last):\n rast += string[last-1-i]\n print(rast)\n\n# Question 5\n\ndef factorial_of_a_number():\n number = int(input(\"enter the number you want to find the factorial\\n\"))\n count = 1\n factorial = 1\n while count <= number:\n factorial = count * factorial\n count += 1\n print(\"The factorial of \", number, \"is \", factorial)\n\n\n# Question 6\ndef check_whether_a_number_falls_in_a_given_range():\n number = int(input(\"enter a number\\n\"))\n if number in range(3,9):\n print( number, \" is in the range\", str(number))\n else :\n print(\"The number is outside the given range.\")\n\n# Question 7\ndef calculate_number_of_upper_case_letters_and_lower_case_letters():\n string=input(\"Enter string:\")\n count1=0\n count2=0\n for i in string:\n if(i.islower()):\n count1=count1+1\n elif(i.isupper()):\n count2=count2+1\n print(\"The number of lowercase characters is:\")\n print(count1)\n print(\"The number of uppercase characters is:\")\n print(count2)\n \n# Question 8\n\n# Question 10\ndef even_number(list):\n for n in list:\n if n % 2 == 0:\n print(n)","repo_name":"everybees/parsel_tongue","sub_path":"itunu/functions.py/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"1236143256","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 28 15:31:53 2014\n\n@author: edouard.duchesnay@cea.fr\n@license: BSD-3-Clause\n\"\"\"\nimport optparse, os.path\nimport nibabel, numpy as np\n\n# import modules\nfrom nsap.use_cases.utils.brainvisa_map_cluster_analysis import *\nfrom nsap.plugins.nipype import set_environment\n#\n\n\nBASE_PATH = '/neurospin/brainomics/2013_imagen_anat_vgwas_gpu'\nDATA_DIR = os.path.join(BASE_PATH, '2013_imagen_bmi', 'data')\nMASK_FILE = os.path.join(BASE_PATH, '2013_imagen_bmi', 'data', 'mask', 'mask.nii')\n#INPUT_IMAGE = os.path.join(BASE_PATH, \"interesting_snp_brain_img\", \"snp_379105_perm_pcorr.nii.gz\")\nOUTPUT_DIR = \"/tmp/mesh\"\n# set image of wheights and taget volume\nmask_im = nibabel.load(MASK_FILE)\nmask = mask_im.get_data() != 0\n\n\n#INPUT_IMAGE = os.path.join(BASE_PATH, \"tv/split_vizu/1-0.1-0.1-0.8_beta.nii\")\n\n#############################################################################\n## READ INPUT IMAGE\n#############################################################################\n\nparser = optparse.OptionParser(description=__doc__)\nparser.add_option('--input',\n help='Inpput image', type=str)\n\noptions, args = parser.parse_args(sys.argv)\n\nINPUT_IMAGE = options.input\n\n\n\n#############################################################################\n## MEsh\n#############################################################################\nset_environment(set_matlab=False, set_brainvisa=True)\ntarget = get_sample_data(\"mni_1mm\").brain\n\noutputs = {}\noutputs.update( do_brainvisa_mesh_cluster(OUTPUT_DIR, INPUT_IMAGE))\n# thresh_neg_bound=(-np.inf,-0),\n# thresh_pos_bound=(0, np.inf)) )\n \n# run render\ndo_mesh_cluster_rendering(mesh_file = outputs[\"mesh_file\"],\n texture_file = outputs[\"cluster_file\"],\n white_mesh_file = get_sample_data(\"mni_1mm\").mesh,\n anat_file = target)\n\n\"\"\"\nbv_env python weigths_maps.py --input=/neurospin/brainomics/2013_imagen_anat_vgwas_gpu/interesting_snp_brain_img/snp_379105_perm_pcorr.nii.gz\nbv_env python weigths_maps.py --input=/neurospin/brainomics/2013_imagen_anat_vgwas_gpu/interesting_snp_brain_img/snp_122664_perm_pcorr.nii.gz\n\ncoordi\n\"\"\"\n","repo_name":"neurospin/scripts","sub_path":"2013_imagen_anat_vgwas_gpu/scripts/10_vizu/weigths_maps.py","file_name":"weigths_maps.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12865761170","text":"import boto3\nimport os, sys\nfrom boto3.dynamodb.conditions import Key,Attr\nfrom commonLambdaFunctions import fetchFromTransitConfigTable, publishToSns\nimport logging\nimport rebalance\nimport pan_vpn_generic\n#transitConfig = os.environ['transitConfigTable']\n#transitConfigTable = 'TransitConfig'\n#region = 'us-east-1'\ntransitConfigTable = os.environ['transitConfigTable']\nregion = os.environ['Region']\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\ndef updateTransitConfig(tableName, data):\n \"\"\"Updates the TransitConfig table with RebalanceStatus\n \"\"\"\n try:\n dynamodb = boto3.resource('dynamodb', region_name=region)\n table = dynamodb.Table(tableName)\n table.put_item(Item=data)\n logger.info(\"Updating Trasnit Config with RebalanceStatus with data: {}\".format(data))\n except Exception as e:\n logger.error('updateTransitConfig() is Failed, Error: {}'.format(str(e)))\n\ndef getSubscriberDataFromVpcTable(tableName, fromPaGroupName):\n \"\"\"Returns an item from Transit VpcTable by filtering the table with attribute PaGroupName\n \"\"\"\n try:\n dynamodb = boto3.resource('dynamodb', region_name=region)\n table = dynamodb.Table(tableName)\n response = table.scan(FilterExpression=Attr('PaGroupName').eq(fromPaGroupName))\n if 'Items' in response:\n return response['Items'][0]\n else:\n logger.error('No data received for FromPaGroup: {} from VpcTable, hence exiting'.format(fromPaGroupName))\n sys.exit(0)\n except Exception as e:\n logger.error(\"Error from getSubscriberDataFromVpcTable), Error: {}\".format(str(e)))\n\ndef checkVpcIdInVpcTable(tableName, vpcId):\n \"\"\"Returns an Item from Transit VpcTable by querying the table with VpcId\n \"\"\"\n try:\n dynamodb = boto3.resource('dynamodb', region_name=region)\n table = dynamodb.Table(tableName)\n response = table.query(KeyConditionExpression=Key('VpcId').eq(vpcId))\n return response\n except Exception as e:\n logger.error(\"Erro from checkVpcIdInVpcTable(), Error: {}\".format(str(e)))\n\ndef getInUsePaGroups(tableName, maxCount):\n \"\"\"Returns list of PaGroups which are InUse==YES \n \"\"\"\n try:\n dynamodb = boto3.resource('dynamodb', region_name=region)\n table = dynamodb.Table(tableName)\n response = table.scan(FilterExpression=Attr('InUse').eq('YES') & Attr('VpcCount').lt(maxCount))\n logger.info(\"PaGroup Info scan result with Filter InUse=YES and VpcCount < {} is: {}\".format(maxCount, response))\n return response['Items']\n except Exception as e:\n logger.error(\"Error from getInUsePaGroups(), Error: {}\".format(str(e)))\n\ndef lambda_handler(event, context):\n logger.info(\"Got Event: {}\".format(event))\n config = fetchFromTransitConfigTable(transitConfigTable)\n if config:\n response = getInUsePaGroups(config['TransitPaGroupInfo'], int(config['PaGroupMaxVpc']))\n if response:\n if config['RebalanceInProgress']=='True':\n if config['RebalanceStatus']=='Done':\n apiKey = pan_vpn_generic.getApiKey(response[0]['N1Mgmt'], config['UserName'], config['Password'])\n result = rebalance.rebalance(apiKey, response, int(config['PaGroupMaxVpc']), config)\n if result:\n # Get the VGW, Region, SubscriberSnsArn and SubscriberAssumeRoleArn from VpcTable\n subscriberData = getSubscriberDataFromVpcTable(config['TransitVpcTable'], result['FromPaGroup']['PaGroupName'])\n result['FromPaGroup']['VpcCount'] = str(result['FromPaGroup']['VpcCount'])\n result['ToPaGroup']['VpcCount'] = str(result['ToPaGroup']['VpcCount'])\n value = {\n 'FromPaGroupName': result['FromPaGroup']['PaGroupName'],\n 'ToPaGroupName': result['ToPaGroup']['PaGroupName'],\n 'VpcId': subscriberData['VpcId'],\n 'VpcCidr': subscriberData['VpcCidr'],\n 'Region': subscriberData['Region'],\n 'SubscriberSnsArn': subscriberData['SubscriberSnsArn'],\n 'SubscriberAssumeRoleArn' : subscriberData['SubscriberAssumeRoleArn'],\n 'CreateStatus': 'Pending',\n 'DeleteStatus':'InProgress'\n }\n item = {'Property': 'RebalanceStatus', 'Value':value}\n updateTransitConfig(transitConfigTable, item)\n # Send DeleteOperatin first\n deleteData = {\n 'Action': 'DeleteVpnConnection',\n 'VpcId': subscriberData['VpcId'],\n 'Region': subscriberData['Region'],\n 'Rebalance': 'True'\n }\n #Publish message to Transit SNS\n publishToSns(subscriberData['SubscriberSnsArn'], deleteData, subscriberData['SubscriberAssumeRoleArn'])\n logger.info(\"Published message to Subscriber SNS with data: {}\".format(deleteData))\n return\n else:\n previousTaskStatus = config['RebalanceStatus']\n if previousTaskStatus['DeleteStatus']=='InProgress':\n vpcStatus = checkVpcIdInVpcTable(config['TransitVpcTable'], previousTaskStatus['VpcId'])\n logger.info(\"Got VPC Status: {}\".format(vpcStatus))\n if len(vpcStatus['Items'])>0:\n if vpcStatus['Items'][0]['PaGroupName']==previousTaskStatus['FromPaGroupName']:\n logger.info(\"Previous Delete VPN Operation is still InProgress, hence exiting from the process\")\n return\n else:\n # Create FetchVpnServerDetails and send to Subscriber SNS\n previousTaskStatus['CreateStatus'] = 'InProgress'\n previousTaskStatus['DeleteStatus'] = 'Completed'\n item = {'Property': 'RebalanceStatus', 'Value':previousTaskStatus}\n updateTransitConfig(transitConfigTable, item)\n\n data = {\n 'Action': 'FetchVpnServerDetails',\n 'Region': previousTaskStatus['Region'],\n 'VpcId': previousTaskStatus['VpcId'],\n 'SubscriberAssumeRoleArn': previousTaskStatus['SubscriberAssumeRoleArn'],\n 'SubscriberSnsArn': previousTaskStatus['SubscriberSnsArn'],\n 'VpcCidr': previousTaskStatus['VpcCidr'],\n 'Rebalance': 'True'\n }\n #Publish message to Transit SNS\n publishToSns(config['TransitSnsArn'], data)\n logger.info(\"Published message to Transit SNS with data: {}\".format(data))\n return\n elif previousTaskStatus['CreateStatus']=='InProgress':\n logger.info(\"Previous task was CreateTask, now check whether it has completed or not\")\n vpcStatus = checkVpcIdInVpcTable(config['TransitVpcTable'], previousTaskStatus['VpcId'])\n logger.info(\"Got VPC Status: {}\".format(vpcStatus))\n if not vpcStatus['Items']:\n logger.info(\"Create Task is still in progress, hence exiting\")\n return\n else:\n if vpcStatus['Items'][0]['PaGroupName'] == previousTaskStatus['ToPaGroupName']:\n logger.info(\"Previous Rebalance task Completed successfully, updating the RebalanceStatus=Done\")\n item = {'Property': 'RebalanceStatus', 'Value':'Done'}\n updateTransitConfig(transitConfigTable, item)\n return\n else:\n logger.error(\"Something terrible happened? Unknown status, Stop StateMachine and Exit\")\n #Something terrible happened? Unknown status, Stop StateMachine and Exit\n return\n else:\n logger.info(\"No PaGroups for Rebalancing, PaGroups are Optimal\")\n else:\n logger.error(\"Not Received any data from TransitConfig table\")\n","repo_name":"PaloAltoNetworks/aws-transit-vpc","sub_path":"lambda/rebalancePaGroupsLambda.py","file_name":"rebalancePaGroupsLambda.py","file_ext":"py","file_size_in_byte":8798,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"81"} +{"seq_id":"15004077049","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType, StringType, IntegerType\n\nif __name__ == '__main__':\n spark = SparkSession.builder.appName(\"test\").master(\"local[*]\").getOrCreate()\n sc = spark.sparkContext\n\n df = spark.read.format(\"csv\").schema(\"id INT,subject STRING, score INT\").load(\"../data/input/sql/stu_score.txt\")\n # df.show()\n\n # SQL风格\n df.createTempView(\"score\")\n df.createOrReplaceTempView(\"score1\")\n df.createGlobalTempView(\"score2\")\n\n spark.sql(\"select subject,count(*) as count from score group by subject\").show()\n spark.sql(\"select subject,count(*) as count from score1 group by subject\").show()\n spark.sql(\"select subject,count(*) as count from global_temp.score2 group by subject\").show()\n","repo_name":"lbobn/PySpark_learning","sub_path":"02_SQL/05_dataframe_process_sql.py","file_name":"05_dataframe_process_sql.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41012228985","text":"# In Python can also use inheritance with the built-in types.\r\n\r\n# For example, we can create a class called \"Text()\" and make it a sub class of \"str\".\r\n# With this our text class will inherit all the features from the \"str\" class.\r\nclass Text(str):\r\n # And we can add more feature to it. Like duplicate for example\r\n def duplicate(self):\r\n return self + self\r\n\r\n\r\n# This text object will inherit all the features from \"str\"\r\n# like \".lower()\" to convert for lower case letters,\r\n# but we can also use the new features we created in the \"Text()\" class.\r\ntext = Text(\"Python\")\r\nprint(text.duplicate())\r\n\r\n\r\n# In this example we are extending the built-in Python \"list()\"\r\n# Adding features to the append method.\r\n# Like printing this message every time an object is appended. For logging for example\r\n# And we call the append method from the base class \"List()\" with the \"super()\" method.\r\nclass TrackableList(list):\r\n def append(self, object):\r\n print(\"Append called\")\r\n super().append(object)\r\n\r\n\r\nmylist = TrackableList()\r\nmylist.append(\"1\")\r\nprint(mylist)\r\n","repo_name":"shubhranshi/mosh-python","sub_path":"07_Classes/76_extending_built_in_types.py","file_name":"76_extending_built_in_types.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30956134732","text":"import traceback\n\nfrom django.conf import settings\n\nfrom anymail.backends.sendgrid import EmailBackend\nfrom anymail.exceptions import AnymailError\n\nfrom .models import HCOutGoingEmail\nfrom .utils import get_logger\n\n\nclass HCSendgridEmailBackend(EmailBackend):\n logger = get_logger('HCSendgridEmailBackend')\n\n def send_messages(self, email_messages):\n \"\"\"\n Sends one or more EmailMessage objects and returns the number of email\n messages sent.\n \"\"\"\n # This API is specified by Django's core BaseEmailBackend\n # (so you can't change it to, e.g., return detailed status).\n # Subclasses shouldn't need to override.\n\n num_sent = 0\n if not email_messages:\n return num_sent\n\n created_session = self.open()\n\n try:\n for message in email_messages:\n \n hc_outgoing_email = HCOutGoingEmail()\n hc_outgoing_email.subject = message.subject\n hc_outgoing_email.to = message.recipients()\n \n if settings.DEBUG:\n self.logger.debug(\"setting dev emails\")\n message.to = settings.DEV_EMAILS\n\n self.logger.debug(str(message.recipients()))\n \n if hasattr(message, 'hc_template_name') and getattr(message, 'hc_template_name', None):\n hc_outgoing_email.template_name = message.hc_template_name\n if hasattr(message, 'hc_created_by') and getattr(message, 'hc_created_by', None):\n hc_outgoing_email.created_by = message.hc_created_by\n \n try:\n sent = self._send(message)\n if message.anymail_status.message_id:\n hc_outgoing_email.anymail_message_id = message.anymail_status.message_id\n except AnymailError:\n self.logger.critical(traceback.format_exc())\n hc_outgoing_email.sent = False\n hc_outgoing_email.error = str(traceback.format_exc())\n \n if self.fail_silently:\n sent = False\n else:\n raise\n finally:\n hc_outgoing_email.save()\n \n if sent:\n num_sent += 1\n finally:\n if created_session:\n self.close()\n\n return num_sent\n\n","repo_name":"SiriusWhi/HomeCaptain_SAAS","sub_path":"homecaptain/apps/util/sendgrid.py","file_name":"sendgrid.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19879970966","text":"import os\n\nif __name__ == \"__main__\":\n train_file = \"/mnt/lustre/share/data/images/meta/train.txt\"\n idx_file = \"data/10percent.txt\"\n out_file = idx_file + \".ext\"\n max_class = 1000\n\n with open(idx_file, \"r\") as fin, open(train_file, \"r\") as f_train:\n all_samples = {}\n idx_samples = []\n selected_samples = []\n for line in f_train.readlines():\n name, label = line.strip().split()\n label = int(label)\n if label < max_class:\n base_name = name.split(\"/\")[1]\n all_samples[base_name] = (label, name)\n print(f\"len of all samples: {len(all_samples)}\")\n \n for line in fin.readlines():\n nm = line.strip()\n selected_samples.append(all_samples[nm])\n \n print(f\"Len of selected samples {len(selected_samples)}\")\n\n with open(out_file, \"w\") as fout:\n for (lb, nm) in selected_samples:\n fout.write(f\"{lb} {nm}\\n\")\n","repo_name":"LayneH/LEWEL","sub_path":"data/cvt_idx.py","file_name":"cvt_idx.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"28560746227","text":"from .layout_functions import radial_distance_layout\n\n__version__ = \"0.1.1\"\n__author__ = \"Benjamin F. Maier\"\n__copyright__ = \"Copyright 2014-2022, \" + __author__\n__credits__ = [__author__]\n__license__ = \"MIT\"\n__maintainer__ = __author__\n__email__ = \"contact@benmaier.org\"\n__status__ = \"Development\"\n","repo_name":"benmaier/radial-distance-layout","sub_path":"radial_distance_layout/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"41830847178","text":"# !/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom pybpodapi.com.messaging.base_message import BaseMessage\nfrom pybpodapi.utils import date_parser\n\n\nclass StderrMessage(BaseMessage):\n \"\"\"\n Stderr message from the server process\n\n .. seealso::\n\n :py:class:`pybpodgui_plugin.com.messaging.base_message.BoardMessage`\n\n \"\"\"\n\n MESSAGE_TYPE_ALIAS = \"stderr\"\n MESSAGE_COLOR = (255, 0, 0)\n\n def __init__(self, content, host_timestamp=None):\n super(StderrMessage, self).__init__(str(content), host_timestamp)\n\n def __str__(self):\n return \"host-time:{0} pc-time:{1} {2}\".format(\n self.host_timestamp if self.host_timestamp is not None else \"\",\n self.pc_timestamp.strftime(\"%Y%m%d%H%M%S\") if self.pc_timestamp else \"\",\n self.content,\n )\n\n def tolist(self):\n return [\n self.MESSAGE_TYPE_ALIAS,\n str(self.pc_timestamp),\n self.host_timestamp,\n self.content,\n None,\n None,\n ]\n\n @classmethod\n def fromlist(cls, row):\n \"\"\"\n Returns True if the typestr represents the class\n \"\"\"\n obj = cls(row[3], float(row[2]) if row[2] else None)\n obj.pc_timestamp = date_parser.parse(row[1])\n\n return obj\n","repo_name":"pybpod/pybpod-api","sub_path":"pybpodapi/com/messaging/stderr.py","file_name":"stderr.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37740667294","text":"import math\nimport unittest\n\n\nclass Solution:\n # bisection method 二分法\n def mySqrt(self, x: int) -> int:\n left, right = 0, x\n ans = -1\n\n while left <= right:\n m = (left + right) // 2\n n = m * m\n\n if n <= x:\n ans = m\n left = m + 1\n else:\n right = m - 1\n\n return ans\n\n\nclass Solution2:\n # Newton's method, 牛顿法\n def mySqrt(self, x: int) -> int:\n if x == 0:\n return 0\n\n C, x0 = float(x), float(x)\n\n while True:\n xi = 0.5 * (x0 + C / x0)\n if abs(x0 - xi) < 1e-7:\n break\n x0 = xi\n return int(x0)\n\n\nclass Test(unittest.TestCase):\n def test_bisection(self):\n for x in range(0, 1000):\n self.assertEqual(Solution().mySqrt(x), int(math.sqrt(x)))\n\n def test_newton(self):\n for x in range(0, 1000):\n self.assertEqual(Solution2().mySqrt(x), int(math.sqrt(x)))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"win5do/play-go","sub_path":"algorithm/leetcode/69_sqrt_x.py","file_name":"69_sqrt_x.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"7631911556","text":"# =====================================================\n# Analysis python script template.\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport sys\nimport numpy as np\nfrom datetime import datetime\n\nwalk = \"Walk\"\nrun = \"Run\"\nnumRuns = 0\nnumEvents = 20\n\n# date\n# now = datetime.now()\n# stringDate = now.strftime(\"%Y-%m\")\n# stringDate = \"2022-01\"\nnow = datetime.now()\nstringDate = now.strftime(\"%Y-%m\")\nprint(\"Date: \", stringDate)\n\n# Glob the files\nfile_list = ['../activities/charlieActivities.csv', '../activities/matthewActivities.csv', '../activities/georgeActivities.csv', '../activities/rhysActivities.csv', \"../activities/finchActivities.csv\", \"../activities/jennyActivities.csv\"]\nnames = [\"charlie\", \"matthew\", \"george\", \"rhys\", \"finch\", \"jenny\"]\nmonths = [\"2022-01\",\"2022-02\",\"2022-03\",\"2022-04\",\"2022-05\",\"2022-06\",\"2022-07\",\"2022-08\",\"2022-09\",\"2022-10\",\"2022-11\"]\n\n# CHARLIE =============================================\n# charlieDistances = charlieActivities[\"distance\"]\n# charlieMovingTime = charlieActivities[\"moving_time\"]\n# charlieTotDistance = 0\n# charlieTotTime = 0\n# charlieRuns = 0\n# charlieDate = charlieActivities[\"start_date_local\"]\n#\n# if len(charlieDistances) != 0:\n# for i in range(len(charlieActivities[\"distance\"])):\n# date = charlieDate[i][0:7]\n# if charlieActivities[\"type\"][i] == run and date == stringDate:\n# charlieRuns += 1\n# charlieTotDistance += charlieDistances[i]\n# charlieTotTime += charlieMovingTime[i]\n#\n# # RHYS =============================================\n# rhysDistances = rhysActivities[\"distance\"]\n# rhysMovingTime = rhysActivities[\"moving_time\"]\n# rhysTotDistance = 0\n# rhysTotTime = 0\n# rhysRuns = 0\n# rhysDate = rhysActivities[\"start_date_local\"]\n#\n# if len(charlieDistances) != 0:\n# for i in range(len(rhysActivities[\"distance\"])):\n# date = rhysDate[i][0:7]\n# if rhysActivities[\"type\"][i] == run and date == stringDate:\n# rhysRuns += 1\n# rhysTotDistance += rhysDistances[i]\n# rhysTotTime += rhysMovingTime[i]\n#\n# # GEORGE =============================================\n# georgeDistances = georgeActivities[\"distance\"]\n# georgeMovingTime = georgeActivities[\"moving_time\"]\n# georgeTotDistance = 0\n# georgeTotTime = 0\n# georgeRuns = 0\n# georgeDate = georgeActivities[\"start_date_local\"]\n#\n# if len(georgeDistances) != 0:\n# for i in range(len(georgeActivities[\"distance\"])):\n# date = georgeDate[i][0:7]\n# if georgeActivities[\"type\"][i] == run and date == stringDate:\n# georgeRuns += 1\n# georgeTotDistance += georgeDistances[i]\n# georgeTotTime += georgeMovingTime[i]\n# # MATTHEW =============================================\n# matthewDistances = matthewActivities[\"distance\"]\n# matthewMovingTime = matthewActivities[\"moving_time\"]\n# matthewTotDistance = 0\n# matthewTotTime = 0\n# matthewRuns = 0\n# matthewDate = matthewActivities[\"start_date_local\"]\n#\n# if len(matthewDistances) != 0:\n# for i in range(len(matthewActivities[\"distance\"])):\n# date = matthewDate[i][0:7]\n# if matthewActivities[\"type\"][i] == run and date == stringDate:\n# matthewRuns += 1\n# matthewTotDistance += matthewDistances[i]\n# matthewTotTime += matthewMovingTime[i]\n#\n#\n# finchDistances = finchActivities[\"distance\"]\n# finchMovingTime = finchActivities[\"moving_time\"]\n# finchTotDistance = 0\n# finchTotTime = 0\n# finchRuns = 0\n# finchDate = finchActivities[\"start_date_local\"]\n#\n# if len(finchDistances) != 0:\n# for i in range(len(finchActivities[\"distance\"])):\n# date = finchDate[i][0:7]\n# if finchActivities[\"type\"][i] == run and date == stringDate:\n# finchRuns += 1\n# finchTotDistance += finchDistances[i]\n# finchTotTime += finchMovingTime[i]\n\nmonthly_group_totals = [] # whole group total kms for month\nmonthly_indy_totals = [] # to show relative effort among the group\nmonthly_total = 0\n\nfor m in months:\n monthly_total = 0\n print(\"Analysing new month: \", m)\n for i, f in enumerate(file_list):\n\n # get activities of person\n acts = pd.read_csv(f, sep=\",\", header=0)\n distances = acts[\"distance\"]\n moving_time = acts[\"moving_time\"]\n totDis = 0\n totTime = 0\n date = acts[\"start_date_local\"]\n\n for a in range(len(acts)):\n d = date[a][0:7]\n if acts[\"type\"][a] == run and d == m:\n monthly_total += distances[a]/1000\n\n monthly_group_totals.append(monthly_total)\n\nfor i, f in enumerate(file_list):\n indy_total = 0\n list = []\n for m in months:\n # get activities of person\n acts = pd.read_csv(f, sep=\",\", header=0)\n distances = acts[\"distance\"]\n moving_time = acts[\"moving_time\"]\n totDis = 0\n totTime = 0\n date = acts[\"start_date_local\"]\n\n for a in range(len(acts)):\n d = date[a][0:7]\n if acts[\"type\"][a] == run and d == m:\n indy_total += distances[a]/1000\n list.append(indy_total)\n # print(\"Indy total: \", str(f), \" is \", indy_total)\n indy_total = 0\n monthly_indy_totals.append(list)\n\nprint(\"Got \", len(monthly_indy_totals), \" indy total lists.\")\nfor l in monthly_indy_totals:\n monthly_group_totals.append(l)\n\n\nprint(\"Length of whole list: \", len(monthly_group_totals))\n\nplt.xlabel(\"Month of 2022\")\nplt.ylabel(\"Total Distance (km)\")\n\n# plt.show()\n\n\n\n\n","repo_name":"CharlieBatchelor/NugRunClub","sub_path":"analysisGraphs/groupEffortByMonth.py","file_name":"groupEffortByMonth.py","file_ext":"py","file_size_in_byte":5487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2958284035","text":"def find_message(text):\n message = \"\"\n for c in text:\n if c.isupper():\n message += c\n return message\n\n'''filter'''\nfind_message_vol2 = lambda text: ''.join(filter(str.isupper, text))\n\n'''not lambda'''\n#this not use lambda,maybe clearer than lambda version\ndef find_message_vol3(text):\n return ''.join(c for c in text if c.isupper())\n#find_message_1 = lambda text:(c + c) for c in text if c.isupper()\n\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n # assert find_message(\"How are you? Eh, ok. Low or Lower? Ohhh.\") == \"HELLO\", \"hello\"\n # assert find_message(\"hello world!\") == \"\", \"Nothing\"\n # assert find_message(\"HELLO WORLD!!!\") == \"HELLOWORLD\", \"Capitals\"\n print(find_message(\"How are you? Eh, ok. Low or Lower? Ohhh.\"))\n print(find_message_vol2(\"How are you? Eh, ok. Low or Lower? Ohhh.\"))\n print(find_message_vol3(\"How are you? Eh, ok. Low or Lower? Ohhh.\"))","repo_name":"AlexRITIAN/CheckIO_Answer","sub_path":"ELEMENTARY/Secret_Message.py","file_name":"Secret_Message.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24515847110","text":"from flask import Flask, request, jsonify, make_response\nimport requests\napp = Flask(__name__)\n\n# @app.route('/', methods =['GET'])\n# def respond():\n# return jsonify({\"reply\":\"What's Up\"})\n\n# @app.route('/', methods =['POST'])\n# def verify():\n# data = request.data\n# return data\n\nemojis = {\n \"pos\": \":)\",\n \"neutral\": \":|\",\n \"neg\": \":(\"\n}\n\ndef fetch_results(query, lang):\n available_languages = [\"english\", \"french\", \"dutch\"]\n\n if lang.lower() not in available_languages:\n return \"Sorry, \" + lang + \"not in the available languages.\"\n else:\n lang = lang.lower()\n \n url = 'http://text-processing.com/api/sentiment/'\n payload = {'text': query, 'lang': lang}\n response = requests.post(url, data = payload)\n\n if response.status_code == 200:\n json_response = response.json()\n label = json_response[\"label\"]\n probability = json_response[\"probability\"][label]\n emoji = emojis[label]\n\n result = query + \" is \" + label + \" \" + emoji + \" : \" + str(int(probability*100)) + \"%\"\n \n else:\n result = \"request overlimit\"\n\n return result\n\ndef results(request):\n action = request.get(\"queryResult\")\n params = action['parameters']\n\n language = params['language']\n query = params['text']\n\n results = fetch_results(query = query, lang = language)\n\n response = {'fulfillmentText' : results}\n return response\n\n\n@app.route('/webhook', methods = ['GET', 'POST'])\ndef webhook():\n res = results(request.get_json(force = True))\n print(jsonify(res))\n return make_response(jsonify(res))\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"bhagwatmugdha/chatbot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17304473508","text":"import requests\nimport time\nfrom parsel import Selector\nfrom tech_news.database import create_news\n\n\n# Requisito 1\ndef fetch(url, timeout=1):\n time.sleep(1)\n try:\n response = requests.get(url, timeout=timeout)\n if response.status_code == 200:\n return response.text\n except (requests.HTTPError, requests.ReadTimeout):\n return None\n\n\n# Requisito 2\ndef scrape_novidades(html_content):\n selector = Selector(text=html_content)\n url = selector.css(\".tec--card .tec--card__info h3 a::attr(href)\").getall()\n return url\n\n\n# Requisito 3\ndef scrape_next_page_link(html_content):\n selector = Selector(text=html_content)\n next = selector.css(\"div.tec--list a.tec--btn::attr(href)\").get()\n if next:\n return next\n else:\n return None\n\n\n# Requisito 4\ndef scrape_noticia(html_content):\n news = {\n 'url': '',\n 'title': '',\n 'timestamp': '',\n 'writer': None,\n 'shares_count': '',\n 'comments_count': '',\n 'summary': '',\n 'sources': [],\n 'categories': []\n }\n\n selector = Selector(text=html_content)\n news[\"url\"] = selector.css(\n \"head link[rel='canonical']::attr(href)\"\n ).get()\n\n news['title'] = selector.css(\n \"main h1#js-article-title::text\"\n ).get()\n\n news['timestamp'] = selector.css(\n \"div.tec--timestamp time::attr(datetime)\"\n ).get()\n\n writer = selector.css(\n \".z--font-bold *::text\"\n ).get()\n\n if(writer is not None and writer != ''):\n formated_writer = writer\n if writer[0] == \" \" and writer[-1] == \" \":\n formated_writer = writer[1:-1]\n news['writer'] = formated_writer\n\n shares = selector.css(\n \"nav.tec--toolbar div.tec--toolbar__item::text\"\n ).get()\n if(shares):\n news['shares_count'] = int(shares.strip(\"Compartilharam\"))\n else:\n news['shares_count'] = 0\n\n comments = selector.css(\n \"#js-comments-btn::attr(data-count)\"\n ).get()\n if(comments):\n news['comments_count'] = int(comments.strip())\n else:\n news['comments_count'] = 0\n\n news['summary'] = \"\".join(selector.css(\n \"div.tec--article__body > p:first-child *::text\"\n ).getall())\n\n news['sources'] = [\n source.strip()\n for source\n in selector.css(\n \"div.z--mb-16 div a::text\"\n ).getall()\n ]\n news['categories'] = [\n category.strip()\n for category\n in selector.css(\n \"#js-categories a::text\"\n ).getall()\n ]\n return news\n\n\n# Requisito 5\ndef get_tech_news(amount):\n base_url = \"https://www.tecmundo.com.br/novidades\"\n html_content = fetch(base_url)\n news_link_list = scrape_novidades(html_content)\n news_list = []\n\n while len(news_link_list) < amount:\n html_content = fetch(scrape_next_page_link(html_content))\n news_link_list.extend(scrape_novidades(html_content))\n\n for link in news_link_list[:amount]:\n content_news = fetch(link)\n created_scrap = scrape_noticia(content_news)\n news_list.append(created_scrap)\n\n create_news(news_list)\n return news_list\n","repo_name":"thisouzadev/ciencia-da-computacao-tech-news","sub_path":"tech_news/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9470787489","text":"import html\nfrom typing import Optional, List\n\nfrom telegram import Message, Chat, Update, Bot, User\nfrom telegram import ParseMode\nfrom telegram.error import BadRequest\nfrom telegram.ext import CommandHandler, Filters\nfrom telegram.ext.dispatcher import run_async\nfrom telegram.utils.helpers import escape_markdown, mention_html\n\nfrom haruka import dispatcher, updater\nfrom haruka.modules.disable import DisableAbleCommandHandler\nfrom haruka.modules.helper_funcs.chat_status import bot_admin, can_promote, user_admin, can_pin\nfrom haruka.modules.helper_funcs.extraction import extract_user\nfrom haruka.modules.log_channel import loggable\nfrom haruka.modules.sql import admin_sql as sql\nfrom haruka.modules.translations.strings import tld\n\nfrom haruka.modules.connection import connected\n\n@run_async\n@bot_admin\n@user_admin\n@loggable\ndef promote(bot: Bot, update: Update, args: List[str]) -> str:\n message = update.effective_message # type: Optional[Message]\n user = update.effective_user # type: Optional[User]\n chat = update.effective_chat # type: Optional[Chat]\n conn = connected(bot, update, chat, user.id)\n if not conn == False:\n chatD = dispatcher.bot.getChat(conn)\n else:\n chatD = update.effective_chat\n if chat.type == \"private\":\n exit(1)\n\n if not chatD.get_member(bot.id).can_promote_members:\n update.effective_message.reply_text(\"Saya tidak bisa promote/demote orang disini! \"\n \"Jadikan saya admin agar saya bisa menambahkan admin baru.\")\n exit(1)\n\n user_id = extract_user(message, args)\n if not user_id:\n message.reply_text(tld(chat.id, \"Tag orangnya, jangan bikin bingung!\"))\n return \"\"\n\n user_member = chatD.get_member(user_id)\n if user_member.status == 'administrator' or user_member.status == 'creator':\n message.reply_text(tld(chat.id, \"Dia sudah jadi admin, ngapain di promosiin lagi?\"))\n return \"\"\n\n if user_id == bot.id:\n message.reply_text(tld(chat.id, \"Saya tidak bisa promote diri sendiri, jadikan admin jangan pelit!\"))\n return \"\"\n\n # set same perms as bot - bot can't assign higher perms than itself!\n bot_member = chatD.get_member(bot.id)\n\n bot.promoteChatMember(chatD.id, user_id,\n can_change_info=bot_member.can_change_info,\n can_post_messages=bot_member.can_post_messages,\n can_edit_messages=bot_member.can_edit_messages,\n can_delete_messages=bot_member.can_delete_messages,\n #can_invite_users=bot_member.can_invite_users,\n can_restrict_members=bot_member.can_restrict_members,\n can_pin_messages=bot_member.can_pin_messages,\n can_promote_members=bot_member.can_promote_members)\n\n message.reply_text(tld(chat.id, f\"Berhasil promote menjadi admin di *{chatD.title}*!\"), parse_mode=ParseMode.MARKDOWN)\n return f\"{html.escape(chatD.title)}:\" \\\n \"\\n#PROMOTED\" \\\n f\"\\nAdmin: {mention_html(user.id, user.first_name)}\" \\\n f\"\\nUser: {mention_html(user_member.user.id, user_member.user.first_name)}\"\n\n\n@run_async\n@bot_admin\n@user_admin\n@loggable\ndef demote(bot: Bot, update: Update, args: List[str]) -> str:\n chat = update.effective_chat # type: Optional[Chat]\n message = update.effective_message # type: Optional[Message]\n user = update.effective_user # type: Optional[User]\n conn = connected(bot, update, chat, user.id)\n if not conn == False:\n chatD = dispatcher.bot.getChat(conn)\n else:\n chatD = update.effective_chat\n if chat.type == \"private\":\n exit(1)\n\n if not chatD.get_member(bot.id).can_promote_members:\n update.effective_message.reply_text(\"Saya tidak bisa promote/demote orang disini! \"\n \"Jadikin saya admin agar saya bisa menambahkan admin baru.\")\n exit(1)\n\n user_id = extract_user(message, args)\n if not user_id:\n message.reply_text(tld(chat.id, \"Jangan bikin bingung, tag orangnya!\"))\n return \"\"\n\n user_member = chatD.get_member(user_id)\n if user_member.status == 'creator':\n message.reply_text(tld(chat.id, \"Maaf saya tidak bisa demote owner ataupun co\"))\n return \"\"\n\n if not user_member.status == 'administrator':\n message.reply_text(tld(chat.id, \"Tidak bisa demote orang jika bukan anda yang mempromosikan!\"))\n return \"\"\n\n if user_id == bot.id:\n message.reply_text(tld(chat.id, \"Saya tidak bisa demote diri sendiri!\"))\n return \"\"\n\n try:\n bot.promoteChatMember(int(chatD.id), int(user_id),\n can_change_info=False,\n can_post_messages=False,\n can_edit_messages=False,\n can_delete_messages=False,\n can_invite_users=False,\n can_restrict_members=False,\n can_pin_messages=False,\n can_promote_members=False)\n message.reply_text(tld(chat.id, f\"Berhasil demote menjadi member di *{chatD.title}*!\"), parse_mode=ParseMode.MARKDOWN)\n return f\"{html.escape(chatD.title)}:\" \\\n \"\\n#DEMOTED\" \\\n f\"\\nAdmin: {mention_html(user.id, user.first_name)}\" \\\n f\"\\nUser: {mention_html(user_member.user.id, user_member.user.first_name)}\"\n\n except BadRequest:\n message.reply_text(\n tld(chat.id, \"Tidak bisa demote atau mngkin saya bukan admin!\")\n )\n return \"\"\n\n\n@run_async\n@bot_admin\n@can_pin\n@user_admin\n@loggable\ndef pin(bot: Bot, update: Update, args: List[str]) -> str:\n user = update.effective_user # type: Optional[User]\n chat = update.effective_chat # type: Optional[Chat]\n\n is_group = chat.type != \"private\" and chat.type != \"channel\"\n\n prev_message = update.effective_message.reply_to_message\n\n is_silent = True\n if len(args) >= 1:\n is_silent = not (args[0].lower() == 'notify' or args[0].lower() == 'loud' or args[0].lower() == 'violent')\n\n if prev_message and is_group:\n try:\n bot.pinChatMessage(chat.id, prev_message.message_id, disable_notification=is_silent)\n except BadRequest as excp:\n if excp.message == \"Chat_not_modified\":\n pass\n else:\n raise\n return f\"{html.escape(chat.title)}:\" \\\n \"\\n#PINNED\" \\\n f\"\\nAdmin: {mention_html(user.id, user.first_name)}\"\n\n return \"\"\n\n\n@run_async\n@bot_admin\n@can_pin\n@user_admin\n@loggable\ndef unpin(bot: Bot, update: Update) -> str:\n chat = update.effective_chat\n user = update.effective_user # type: Optional[User]\n\n try:\n bot.unpinChatMessage(chat.id)\n except BadRequest as excp:\n if excp.message == \"Chat_not_modified\":\n pass\n else:\n raise\n\n return f\"{html.escape(chat.title)}:\" \\\n \"\\n#UNPINNED\" \\\n f\"\\nAdmin: {mention_html(user.id, user.first_name)}\"\n\n\n@run_async\n@bot_admin\n@user_admin\ndef invite(bot: Bot, update: Update):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n conn = connected(bot, update, chat, user.id, need_admin=False)\n if not conn == False:\n chatP = dispatcher.bot.getChat(conn)\n else:\n chatP = update.effective_chat\n if chat.type == \"private\":\n exit(1)\n\n if chatP.username:\n update.effective_message.reply_text(chatP.username)\n elif chatP.type == chatP.SUPERGROUP or chatP.type == chatP.CHANNEL:\n bot_member = chatP.get_member(bot.id)\n if bot_member.can_invite_users:\n invitelink = chatP.invite_link\n #print(invitelink)\n if not invitelink:\n invitelink = bot.exportChatInviteLink(chatP.id)\n\n update.effective_message.reply_text(invitelink)\n else:\n update.effective_message.reply_text(tld(chat.id, \"Saya tidak bisa mengakses via invite link!\"))\n else:\n update.effective_message.reply_text(tld(chat.id, \"Saya hanya bisa memberi link jika group sudah di setting publik, sorry!\"))\n\n\n@run_async\ndef adminlist(bot, update):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n conn = connected(bot, update, chat, user.id, need_admin=False)\n if not conn == False:\n chatP = dispatcher.bot.getChat(conn)\n else:\n chatP = update.effective_chat\n if chat.type == \"private\":\n exit(1)\n \n administrators = chatP.get_administrators()\n\n text = tld(chat.id, \"Admins in\") + \" *{}*:\".format(chatP.title or tld(chat.id, \"this chat\"))\n for admin in administrators:\n user = admin.user\n status = admin.status\n if status == \"creator\":\n name = user.first_name + (user.last_name or \"\") + tld(chat.id, \" (Creator)\")\n else:\n name = user.first_name + (user.last_name or \"\")\n text += f\"\\n• `{name}`\"\n\n update.effective_message.reply_text(text, parse_mode=ParseMode.MARKDOWN)\n\n\n@user_admin\n@run_async\ndef reaction(bot: Bot, update: Update, args: List[str]) -> str:\n chat = update.effective_chat # type: Optional[Chat]\n if len(args) >= 1:\n var = args[0]\n print(var)\n if var == \"False\":\n sql.set_command_reaction(chat.id, False)\n update.effective_message.reply_text(\"Reaksi dinonaktifkan pada perintah admin untuk pengguna\")\n elif var == \"True\":\n sql.set_command_reaction(chat.id, True)\n update.effective_message.reply_text(\"Reaksi diaktifkan pada perintah admin untuk pengguna\")\n else:\n update.effective_message.reply_text(\"Please enter True or False!\", parse_mode=ParseMode.MARKDOWN)\n else:\n status = sql.command_reaction(chat.id)\n if status == False:\n update.effective_message.reply_text(\"Reaksi atas perintah admin untuk pengguna sekarang `dinonaktifkan`!\", parse_mode=ParseMode.MARKDOWN)\n else:\n update.effective_message.reply_text(\"Reaksi atas perintah admin untuk pengguna sekarang `diaktifkan`!\", parse_mode=ParseMode.MARKDOWN)\n\n\n@run_async\n@user_admin\ndef refresh_admin(update, _):\n try:\n ADMIN_CACHE.pop(update.effective_chat.id)\n except KeyError:\n pass\n\n update.effective_message.reply_text(\"⚡️ 𝙚𝙯𝙗𝙬 𝙞𝙨 𝙖𝙘𝙩𝙞𝙫𝙚 ⚡️\")\n \n\n__help__ = \"\"\"\n - /adminlist | /admins: list of admins in the chat\n\n*Admin only:*\n - /pin: silently pins the message replied to - add 'loud' or 'notify' to give notifs to users.\n - /unpin: unpins the currently pinned message\n - /invitelink: gets invitelink\n - /promote: promotes the user replied to\n - /demote: demotes the user replied to\n\"\"\"\n\n__mod_name__ = \"Admin\"\n\nPIN_HANDLER = DisableAbleCommandHandler(\"pin\", pin, pass_args=True, filters=Filters.group)\nUNPIN_HANDLER = DisableAbleCommandHandler(\"unpin\", unpin, filters=Filters.group)\n\nINVITE_HANDLER = CommandHandler(\"invitelink\", invite)\n\nPROMOTE_HANDLER = DisableAbleCommandHandler(\"promote\", promote, pass_args=True)\nDEMOTE_HANDLER = DisableAbleCommandHandler(\"demote\", demote, pass_args=True)\n\nREACT_HANDLER = DisableAbleCommandHandler(\"reaction\", reaction, pass_args=True, filters=Filters.group)\nADMIN_REFRESH_HANDLER = CommandHandler(\"reload\", refresh_admin, filters=Filters.group)\n\nADMINLIST_HANDLER = DisableAbleCommandHandler([\"adminlist\", \"admins\"], adminlist)\n\ndispatcher.add_handler(PIN_HANDLER)\ndispatcher.add_handler(UNPIN_HANDLER)\ndispatcher.add_handler(INVITE_HANDLER)\ndispatcher.add_handler(PROMOTE_HANDLER)\ndispatcher.add_handler(DEMOTE_HANDLER)\ndispatcher.add_handler(ADMINLIST_HANDLER)\ndispatcher.add_handler(REACT_HANDLER)\ndispatcher.add_handler(ADMIN_REFRESH_HANDLER)\n","repo_name":"ez69bw/ezbw-Group","sub_path":"haruka/modules/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":12068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70912026506","text":"def p(x):\n if x==1:\n return 0\n for i in range(2,int(x**0.5)+1):\n if x%i==0:\n return 0\n else:\n return 1\ndef np(x):\n while p(x)==0:\n x=x+1\n return x\ndef pp(x):\n while p(x)==0:\n x=x-1\n return x\nfor i in range(int(input())):\n x=int(input())\n a=np(x)\n b=pp(x)\n if x-b<=a-x:\n print(b)\n else:\n print(a)","repo_name":"Nagendra180/codemind-python","sub_path":"Nearest_Prime.py","file_name":"Nearest_Prime.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6039356237","text":"import numpy as np\nimport pandas as pd\n\nfrom update_project.general.results_io import ResultsIO\n\n\ndef get_lfp(nwbfile):\n electrode_df = nwbfile.electrodes.to_dataframe()\n ripple_channel = electrode_df.index[electrode_df['ripple_channel'] == 1][0]\n pfc_channel = electrode_df.query('location == \"PFC\"').index[-1] # just take last pfc channel\n\n lfp_ca1 = nwbfile.processing['ecephys']['LFP']['LFP'].data[:, ripple_channel]\n lfp_pfc = nwbfile.processing['ecephys']['LFP']['LFP'].data[:, pfc_channel]\n rate = nwbfile.processing['ecephys']['LFP']['LFP'].rate\n timestamps = np.arange(0, len(lfp_ca1) / rate, 1 / rate)\n\n lfp_dict = dict(CA1=pd.Series(index=timestamps[:], data=lfp_ca1),\n PFC=pd.Series(index=timestamps[:], data=lfp_pfc))\n\n return pd.DataFrame.from_dict(lfp_dict)\n\n\ndef get_theta(nwbfile, adjust_reference=False, session_id=''):\n electrode_df = nwbfile.electrodes.to_dataframe()\n ripple_channel = electrode_df.index[electrode_df['ripple_channel'] == 1][0]\n\n band_df = nwbfile.processing['ecephys']['decomposition_amplitude'].bands.to_dataframe()\n band_ind = np.array(band_df.index[band_df['band_name'] == 'theta'])[0]\n amp = nwbfile.processing['ecephys']['decomposition_amplitude'].data[:, ripple_channel, band_ind]\n phase = nwbfile.processing['ecephys']['decomposition_phase'].data[:, ripple_channel, band_ind]\n rate = nwbfile.processing['ecephys']['decomposition_amplitude'].rate\n timestamps = np.arange(0, len(amp) / rate, 1 / rate)\n\n if adjust_reference:\n phase = adjust_theta_reference(phase, session_id)\n\n theta_dict = dict(amplitude=pd.Series(index=timestamps[:], data=amp),\n phase=pd.Series(index=timestamps[:], data=phase))\n\n return pd.DataFrame.from_dict(theta_dict)\n\n\ndef adjust_theta_reference(phase, session_id):\n # load phase reference data\n results_io = ResultsIO(creator_file=__file__, session_id=session_id, folder_name='phase-reference')\n fname = results_io.get_data_filename(filename='theta_phase_ref_adjustment', results_type='session', format='pkl')\n import_data = results_io.load_pickled_data(fname)\n theta_hist_df = [d for d in import_data][0]\n\n # get amount to adjust\n # phase_mins = theta_hist_df['phase_interval'].apply(lambda x: x.mid).to_numpy()\n phase_mins = np.linspace(-np.pi, np.pi, 12)\n phase_peak = phase_mins[theta_hist_df['phase_adj'].to_numpy()[0]]\n new_ref = phase_mins[int(len(phase_mins)/2 - 1)]\n\n # adjust phase\n phase_adjusted = phase.copy()\n amount_to_shift = new_ref - phase_peak #-np.pi - phase_peak\n phase_adjusted = phase_adjusted + amount_to_shift\n vals_left = phase_adjusted < -np.pi\n vals_right = phase_adjusted > np.pi\n phase_adjusted[vals_left] = phase_adjusted[vals_left] + np.pi * 2\n phase_adjusted[vals_right] = phase_adjusted[vals_right] - np.pi * 2\n\n assert (np.unique(np.histogram(phase, phase_mins)[0]) == np.unique(np.histogram(phase_adjusted, phase_mins)[0])).all()\n\n return phase_adjusted\n","repo_name":"stephprince/update-project","sub_path":"update_project/general/lfp.py","file_name":"lfp.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21450672725","text":"\"\"\"\n Melhore o desafio 061, perguntando para o usuário se ele quer mostrar mais alguns termos. O programa encerra quando ele disser que quer mostrar 0 termos.\n\"\"\"\nprint('Progressão Aritmética v3.0')\nprimeiro = int(input('Digite o Termo Inicial: '))\nrazao = int(input('Digite a Razão: '))\ntermo = primeiro\ncont = 1\ntotal = 0\nmais = 10\nwhile (mais != 0):\n total += mais\n while (cont <= total):\n print(f'{termo}', end=' - ')\n termo += razao\n cont += 1\n print('pause')\n mais = int(input('Quantos termos você quer mostrar a mais: '))\nprint(f'Progressão finalizada com {total} termos mostrados')\n","repo_name":"otonielnn/Python_CursoEmVideo","sub_path":"Desafios/ex062.py","file_name":"ex062.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34589133152","text":"font_copyright = \"Copyright 2020 Frank D. Martinez M.\"\nfont_name = \"my-icons\"\nfont_family = \"my-icons\"\nfont_start_code = 0xe000\n\n# Directories from where to import svg icons\n#\n# [(, ), ...]\n#\nsources = [\n ('bs-', 'bootstrap-icons'),\n ('my-', 'my-icons'),\n]\n\n# Selection of icons to be included in the font\n#\n# [(, ), ...]\n#\nselect = [\n ('null', 'null'), # Always include a default glyph (null)\n ('bs-info-circle', 'infoCircle'),\n ('bs-file-earmark', 'fileEarmark'),\n ('bs-folder2-open', 'folderOpen'),\n ('bs-hdd', 'save'),\n ('bs-file-earmark-arrow-up', 'fileImport'),\n ('bs-file-earmark-arrow-down', 'fileExport'),\n ('bs-folder', 'folder'),\n ('bs-sliders', 'sliders'),\n ('bs-eye', 'eye'),\n ('bs-layers', 'layers'),\n ('my-example', 'geom'),\n]\n\n# Generate qml component\ngen_qml = True\n\n# Generate html demo\ngen_html = True\n\n# Generate C++ Header file\ngen_cpp = True\ngen_cpp_header_file = \"my-cpp-header-file.hpp\" # [Optional] defaults to {font_name}.h\ngen_cpp_namespace = \"MyIcons\" # [Optional] defaults to Icon \ngen_cpp_constexpr = True # [Optional] defaults to False\n\n# Generate ImGui Icon Font c++ files\ngen_imgui = True\ngen_imgui_file = \"icons_lib.hpp\" # [Optional] defaults to {font_name}_lib.hpp\n","repo_name":"mnesarco/ff-batch","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"3242391081","text":"import random\nimport math\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport itertools\n\nfrom sklearn.datasets import load_iris\niris = load_iris()\ndata = iris.data\nimport time\n\ndef wss(data, clusters, centroids, k): \n wss = 0\n\n for l in range(k): \n data_l = data[clusters == l]\n for i in data_l: \n wss += np.sum((centroids[l] - i) ** 2)\n\n return wss\n\ndef iris_visu(data, clusters): \n X = [0, 0, 0, 1, 1, 2]\n Y = [1, 2, 3, 2, 3, 3]\n\n plt.figure(figsize=(12, 8))\n for i, x, y in zip(range(6), X, Y): \n plt.subplot(2, 3, i+1)\n for cluster, c in zip(range(3), 'rgb'): \n plt.scatter(\n data[clusters==cluster][:, x], \n data[clusters==cluster][:, y], \n c=c\n )\n plt.xlabel(iris.feature_names[x])\n plt.ylabel(iris.feature_names[y])\n plt.autoscale()\n plt.grid()\n plt.show()\n\n# Hartiganのアルゴリズム\ndef hartigan_missing(data, n_clusters, rs_for_initial_values=0):\n np.random.seed(rs_for_initial_values)\n \n #ランダムにク���スタを割り当てる\n clusters = np.random.randint(0, n_clusters, data.shape[0])\n\n #nc(クラスタcに属する値の数)\n def nc(cluster) : \n #nc = len(data[clusters==cluster])\n nc = data[clusters==cluster].shape[0]\n return nc\n\n #ncj(クラスタcに属する, j列の欠損していない値の数)\n def ncj(cluster, j) : \n data_cj = data[clusters==cluster][:, j]\n complete_data_cj = ~np.isnan(data_cj)\n ncj = np.count_nonzero(complete_data_cj)\n return ncj\n\n #x_bar cj \n def xbar_cj(cluster, j) : \n sum_xij = 0\n for x in data[clusters==cluster][:, j] : \n if math.isnan(x) : #xが欠損値nanの場合にTrueを返す\n #sum_xij += 0\n pass\n else : \n sum_xij += x\n return sum_xij / ncj(cluster, j)\n \n #判別式の右辺のシグマ以降を計算する関数\n def right_sigma(q, i, frm = 0, to = data.shape[1]):\n result = 0;\n for j in range(frm, to):\n if data[i, j] == np.nan : \n #result += 0\n pass\n else : \n result += (xbar_cj(q, j) - data[i, j]) ** 2\n return result\n\n #判別式の左辺のシグマ以降を計算する関数\n def left_sigma(p, i, frm = 0, to = data.shape[1]):\n result = 0\n for j in range(frm, to):\n if data[i, j] == np.nan : \n #result += 0\n pass\n else : \n result += (ncj(p, j) ** 2 / ((ncj(p, j) - 1) ** 2)) * ((xbar_cj(p, j) - data[i, j]) ** 2)\n return result\n \n #アルゴリズムの実行\n #個体が一巡する間に入れ替えが起こらなければ終了なので、入れ替えが起こらないときに、カウントする\n cnt = 0\n while cnt < data.shape[0] : \n #判別式の実行\n for i, x in enumerate(data) : \n #リストを初期化\n right_list = []\n q_list = []\n #pをxの属するクラスタにする\n p = clusters[i]\n for q in range(n_clusters) : \n #pの属さないクラスタに対し、計算を行う\n if p != q : \n left = ((nc(p) - 1) / nc(p)) * left_sigma(p, i)\n right = (nc(q) / (nc(q) + 1)) * right_sigma(q, i)\n if left > right : \n #判別式を満たすものをリストに追加する\n right_list.append(right)\n q_list.append(q)\n if len(right_list) != 0 : \n #リストが空でなければ、リストの最小値のクラスタを割り当てる\n clusters[i] = q_list[right_list.index(min(right_list))]\n #クラスタが変更されたため、カウントを0にする\n cnt = 0\n else : \n #リストが空ならば、クラスタは変更されない。したがって、カウントする\n cnt += 1\n centroids = []\n for j in range(n_clusters): \n centroids.append(data[clusters == j].mean(axis=0))\n return clusters, centroids\n\nif __name__ == '__main__': \n # Hartigan\n start = time.time()\n clusters, centroids = hartigan_missing(data, 3)\n end = time.time()\n print(\"実行にかかった時間は{}\".format(end - start))\n\n # wssを表示\n w = wss(data, clusters, centroids, 3)\n print(\"Hartiganのwss(クラスター内分散の和)は{}です。\".format(w))\n\n # 結果の可視化\n iris_visu(data, clusters)","repo_name":"risingjiro/clustering","sub_path":"hartigan.py","file_name":"hartigan.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35343374090","text":"import time\nfrom lib.constants import ACCEL_SPEED, Direction\nfrom lib.Vector import Vector\n\nclass Rectangle(object):\n def __init__(self, pos, size):\n self.pos = pos\n self.size = size\n self.creation_time = time.time()\n\n def collides(self, other):\n return ((\n self.pos.x <= other.pos.x + other.size.x\n ) and (\n other.pos.x <= self.pos.x + self.size.x\n )) and ((\n self.pos.y <= other.pos.y + other.size.y\n ) and (\n other.pos.y <= self.pos.y + self.size.y\n ))\n\n def get_distance(self, other):\n return self.pos.get_distance(other.pos)\n\n def get_distance_components(self, other):\n return self.pos.get_distance_components(other.pos)\n\nclass AccelRectangle(Rectangle):\n def __init__(self, pos, size):\n super().__init__(pos, size)\n self.velocity = Vector(0)\n\n def accelerate(self, direction):\n if direction == Direction.UP:\n self.velocity.y -= ACCEL_SPEED\n elif direction == Direction.DOWN:\n self.velocity.y += ACCEL_SPEED\n elif direction == Direction.LEFT:\n self.velocity.x -= ACCEL_SPEED\n elif direction == Direction.RIGHT:\n self.velocity.x += ACCEL_SPEED\n\n def check_bounds(self, bounds):\n if self.pos.x < 0:\n self.pos.x = 0\n if self.velocity.x < 0:\n self.velocity.x = 0\n elif self.pos.x > bounds.x - self.size.x:\n self.pos.x = bounds.x - self.size.x\n if self.velocity.x > 0:\n self.velocity.x = 0\n if self.pos.y < 0:\n self.pos.y = 0\n if self.velocity.y < 0:\n self.velocity.y = 0\n elif self.pos.y > bounds.y - self.size.y:\n self.pos.y = bounds.y - self.size.y\n if self.velocity.y > 0:\n self.velocity.y = 0\n\n def decelerate(self):\n self.velocity.slow(ACCEL_SPEED / 2)\n\n def move(self):\n self.pos.x += self.velocity.x\n self.pos.y += self.velocity.y\n","repo_name":"aldahick/dotter.py","sub_path":"lib/Rectangle.py","file_name":"Rectangle.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23006931875","text":"from data import data\nimport re\n\nr = re.compile(\"(\\w+) ([+-]\\d+)\")\n\ndef parse_command(line):\n m = r.findall(line)\n cmd, n = m[0]\n return cmd, int(n)\n\ndef find_infinite_loop(commands):\n has_executed = [False] * len(commands)\n\n acc = 0\n i = 0\n while i < len(commands):\n if has_executed[i]:\n return (i, acc)\n\n has_executed[i] = True\n\n c_i, n_i = commands[i]\n \n if c_i == 'acc':\n acc += n_i\n i += 1\n elif c_i == 'jmp':\n i += n_i\n elif c_i == 'nop':\n i += 1\n else:\n raise Exception(\"invalid command\")\n \n return (i, acc)\n\ndef replace_command(commands):\n n = len(commands)\n mirror = {'jmp': 'nop', 'nop': 'jmp'}\n\n for i in range(n): \n c_i, n_i = commands[i]\n if c_i != 'acc':\n commands_copy = commands.copy()\n commands_copy[i] = (mirror[c_i], n_i)\n \n res = find_infinite_loop(commands_copy)\n\n if res[0] == n: # program terminates, success\n return res\n\n return (0, 0) # failure\n\n# load data\nlines = data.split('\\n')\ncommands = list(map(parse_command, lines))\n\n# part 1\n_, s1 = find_infinite_loop(commands)\nprint(s1)\n\n# part 2\nidx, s2 = replace_command(commands)\nprint(s2)","repo_name":"viktornilssoninfotiv/advent","sub_path":"Jonte/day08/debug_boot_script.py","file_name":"debug_boot_script.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6467386179","text":"# 20. Altere o programa de cálculo do fatorial, permitindo ao usuário calcular o fatorial várias vezes e limitando o fatorial a números inteiros positivos e menores que 16\n\nwhile True:\n fatorial = 1\n try:\n num = int(input('Número: '))\n except ValueError:\n break\n if num > 16 or num < 0:\n break\n else:\n for i in range(1, num + 1):\n fatorial *= i\n\n print(fatorial)\n","repo_name":"ttund21/LearningPython","sub_path":"PythonBrasilExercicios/EstruturaDeRepeticao/Respostas/20_Questao.py","file_name":"20_Questao.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6427603939","text":"import torch\nfrom torch import nn\n\nfrom pytorch_widedeep.wdtypes import Tensor\nfrom pytorch_widedeep.bayesian_models import bayesian_nn as bnn\nfrom pytorch_widedeep.bayesian_models._base_bayesian_model import (\n BaseBayesianModel,\n)\n\n\nclass BayesianWide(BaseBayesianModel):\n r\"\"\"Defines a `Wide` model. This is a linear model where the\n non-linearlities are captured via crossed-columns\n\n Parameters\n ----------\n input_dim: int\n size of the Embedding layer. `input_dim` is the summation of all the\n individual values for all the features that go through the wide\n component. For example, if the wide component receives 2 features with\n 5 individual values each, `input_dim = 10`\n pred_dim: int\n size of the ouput tensor containing the predictions\n prior_sigma_1: float, default = 1.0\n The prior weight distribution is a scaled mixture of two Gaussian\n densities:\n\n $$\n \\begin{aligned}\n P(\\mathbf{w}) = \\prod_{i=j} \\pi N (\\mathbf{w}_j | 0, \\sigma_{1}^{2}) + (1 - \\pi) N (\\mathbf{w}_j | 0, \\sigma_{2}^{2})\n \\end{aligned}\n $$\n\n `prior_sigma_1` is the prior of the sigma parameter for the first of the two\n Gaussians that will be mixed to produce the prior weight\n distribution.\n prior_sigma_2: float, default = 0.002\n Prior of the sigma parameter for the second of the two Gaussian\n distributions that will be mixed to produce the prior weight\n distribution\n prior_pi: float, default = 0.8\n Scaling factor that will be used to mix the Gaussians to produce the\n prior weight distribution\n posterior_mu_init: float = 0.0\n The posterior sample of the weights is defined as:\n\n $$\n \\begin{aligned}\n \\mathbf{w} &= \\mu + log(1 + exp(\\rho))\n \\end{aligned}\n $$\n\n where:\n\n $$\n \\begin{aligned}\n \\mathcal{N}(x\\vert \\mu, \\sigma) &= \\frac{1}{\\sqrt{2\\pi}\\sigma}e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}\\\\\n \\log{\\mathcal{N}(x\\vert \\mu, \\sigma)} &= -\\log{\\sqrt{2\\pi}} -\\log{\\sigma} -\\frac{(x-\\mu)^2}{2\\sigma^2}\\\\\n \\end{aligned}\n $$\n\n $\\mu$ is initialised using a normal distributtion with mean\n `posterior_mu_init` and std equal to 0.1.\n posterior_rho_init: float = -7.0\n As in the case of $\\mu$, $\\rho$ is initialised using a\n normal distributtion with mean `posterior_rho_init` and std equal to\n 0.1.\n\n Attributes\n -----------\n bayesian_wide_linear: nn.Module\n the linear layer that comprises the wide branch of the model\n\n Examples\n --------\n >>> import torch\n >>> from pytorch_widedeep.bayesian_models import BayesianWide\n >>> X = torch.empty(4, 4).random_(6)\n >>> wide = BayesianWide(input_dim=X.unique().size(0), pred_dim=1)\n >>> out = wide(X)\n \"\"\"\n\n def __init__(\n self,\n input_dim: int,\n pred_dim: int = 1,\n prior_sigma_1: float = 1.0,\n prior_sigma_2: float = 0.002,\n prior_pi: float = 0.8,\n posterior_mu_init: float = 0.0,\n posterior_rho_init: float = -7.0,\n ):\n super(BayesianWide, self).__init__()\n # Embeddings: val + 1 because 0 is reserved for padding/unseen cateogories.\n self.bayesian_wide_linear = bnn.BayesianEmbedding(\n n_embed=input_dim + 1,\n embed_dim=pred_dim,\n padding_idx=0,\n prior_sigma_1=prior_sigma_1,\n prior_sigma_2=prior_sigma_2,\n prior_pi=prior_pi,\n posterior_mu_init=posterior_mu_init,\n posterior_rho_init=posterior_rho_init,\n )\n self.bias = nn.Parameter(torch.zeros(pred_dim))\n\n def forward(self, X: Tensor) -> Tensor:\n out = self.bayesian_wide_linear(X.long()).sum(dim=1) + self.bias\n return out\n","repo_name":"jrzaurin/pytorch-widedeep","sub_path":"pytorch_widedeep/bayesian_models/tabular/bayesian_linear/bayesian_wide.py","file_name":"bayesian_wide.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","stars":1164,"dataset":"github-code","pt":"81"} +{"seq_id":"32308195983","text":"import dataclasses\nimport json\nfrom itertools import chain\n\nimport wx\n\nfrom db import LineDrug, LineProcedure, Patient, Visit\nfrom misc import (\n calc_quantity,\n check_blank_to_none,\n sale_unit_from_db,\n update_druglist_bm,\n weight_bm,\n)\nfrom misc.printer import PrintOut, printdata\nfrom ui import mainview as mv\n\n\nclass GetWeightBtn(wx.BitmapButton):\n \"Get latest weight\"\n\n def __init__(self, parent: \"mv.MainView\"):\n super().__init__(parent, bitmap=wx.Bitmap(weight_bm))\n self.mv = parent\n self.SetToolTip(\"Lấy cân nặng mới nhất\")\n self.Bind(wx.EVT_BUTTON, self.onClick)\n self.Disable()\n\n def onClick(self, _):\n visit_count = self.mv.visit_list.GetItemCount()\n if self.mv.state.patient and (visit_count > 0):\n self.mv.weight.SetWeight(\n self.mv.connection.execute(\n f\"\"\"\n SELECT weight\n FROM {Visit.__tablename__}\n WHERE (patient_id) = {self.mv.state.patient.id}\n ORDER BY exam_datetime DESC\n LIMIT 1\n \"\"\"\n ).fetchone()[\"weight\"]\n )\n\n\nclass NoRecheckBtn(wx.Button):\n \"Set RecheckCtrl Value to 0\"\n\n def __init__(self, parent: \"mv.MainView\", **kwargs):\n super().__init__(parent, label=\"Không tái khám\", **kwargs)\n self.mv = parent\n self.Bind(wx.EVT_BUTTON, self.onClick)\n self.Disable()\n\n def onClick(self, _):\n self.mv.recheck.SetValue(0)\n\n\nclass UpdateQuantityBtn(wx.Button):\n \"Update quantity in druglist based on days\"\n\n def __init__(self, parent: \"mv.MainView\"):\n super().__init__(parent)\n self.mv = parent\n self.SetLabelText(\"Cập nhật số lượng thuốc\")\n self.SetBitmap(wx.Bitmap(update_druglist_bm))\n self.Bind(wx.EVT_BUTTON, self.onClick)\n self.Disable()\n\n def onClick(self, _):\n self.update_quantity()\n\n def update_quantity(self):\n \"\"\"Update quantity in DrugList, also update price\"\"\"\n state = self.mv.state\n drug_list = self.mv.order_book.prescriptionpage.drug_list\n for idx, item in enumerate(\n chain(state.old_linedrug_list, state.new_linedrug_list)\n ):\n wh = state.all_warehouse[item.warehouse_id]\n item.quantity = calc_quantity(\n times=item.times,\n dose=item.dose,\n days=self.mv.days.Value,\n sale_unit=wh.sale_unit,\n config=self.mv.config,\n )\n drug_list.SetItem(\n idx,\n 4,\n f\"{item.quantity} {sale_unit_from_db(wh.sale_unit , wh.usage_unit)}\",\n )\n self.mv.price.FetchPrice()\n self.Disable()\n\n\nclass NewVisitBtn(wx.Button):\n def __init__(self, parent: \"mv.MainView\"):\n super().__init__(parent, label=\"Lượt khám mới\")\n self.mv = parent\n self.Bind(wx.EVT_BUTTON, self.onClick)\n self.Disable()\n\n def onClick(self, _):\n if self.mv.patient_book.Selection == 1:\n self.mv.state.visit = None\n else:\n idx: int = self.mv.visit_list.GetFirstSelected()\n self.mv.visit_list.Select(idx, 0)\n\n\nclass SaveBtn(wx.Button):\n \"Insert/update visit\"\n\n def __init__(self, parent: \"mv.MainView\"):\n super().__init__(parent, id=wx.ID_SAVE, label=\"Lưu\")\n self.mv = parent\n self.Disable()\n self.Bind(wx.EVT_BUTTON, self.onClick)\n\n def onClick(self, _):\n if self.mv.state.visit:\n self.update_visit()\n else:\n self.insert_visit()\n\n def insert_visit(self):\n if self.mv.check_diag_wt_filled():\n mv = self.mv\n state = mv.state\n p = state.patient\n assert p is not None\n assert state.old_linedrug_list == []\n assert state.old_lineprocedure_list == []\n past_history = check_blank_to_none(mv.past_history.Value)\n try:\n with mv.connection as con:\n con.execute(\n f\"UPDATE {Patient.__tablename__} SET past_history = ? WHERE id = {p.id}\",\n (past_history,),\n )\n vid = con.execute(\n f\"\"\"\n INSERT INTO {Visit.__tablename__} ({Visit.commna_joined_field_names()})\n VALUES ({Visit.named_style_placeholders()})\n \"\"\",\n {\n \"diagnosis\": mv.diagnosis.Value.strip(),\n \"weight\": mv.weight.GetWeight(),\n \"days\": mv.days.Value,\n \"recheck\": mv.recheck.Value,\n \"price\": mv.price.GetPrice(),\n \"patient_id\": p.id,\n \"vnote\": check_blank_to_none(mv.vnote.Value),\n \"follow\": check_blank_to_none(mv.follow.Value),\n },\n ).lastrowid\n assert vid is not None\n con.executemany(\n f\"\"\"\n INSERT INTO {LineDrug.__tablename__}\n ({LineDrug.commna_joined_field_names()})\n VALUES ({LineDrug.named_style_placeholders()})\n \"\"\",\n (\n dataclasses.asdict(item) | {\"visit_id\": vid}\n for item in state.new_linedrug_list\n ),\n )\n con.executemany(\n f\"\"\"\n INSERT INTO {LineProcedure.__tablename__}\n ({LineProcedure.commna_joined_field_names()})\n VALUES ({LineProcedure.named_style_placeholders()})\n \"\"\",\n (\n dataclasses.asdict(item) | {\"visit_id\": vid}\n for item in state.new_lineprocedure_list\n ),\n )\n wx.MessageBox(\n \"Lưu lượt khám mới thành công\",\n \"Lưu lượt khám mới\",\n style=wx.OK_DEFAULT | wx.ICON_NONE,\n )\n if self.mv.config.ask_print:\n if (\n wx.MessageBox(\"In toa về?\", \"In toa\", style=wx.YES | wx.NO)\n == wx.YES\n ):\n printout = PrintOut(self.mv)\n wx.Printer(wx.PrintDialogData(printdata)).Print(\n self, printout, False\n )\n self.mv.state.refresh()\n except Exception as error:\n wx.MessageBox(f\"Lỗi không lưu lượt khám được\\n{error}\", \"Lỗi\")\n\n def update_visit(self):\n mv = self.mv\n state = mv.state\n if self.mv.check_diag_wt_filled():\n p = state.patient\n assert p is not None\n past_history = check_blank_to_none(self.mv.past_history.Value)\n v = state.visit\n assert v is not None\n v.diagnosis = mv.diagnosis.Value.strip()\n v.weight = mv.weight.GetWeight()\n v.days = mv.days.Value\n v.recheck = mv.recheck.Value\n v.price = mv.price.GetPrice()\n v.vnote = check_blank_to_none(self.mv.vnote.Value)\n v.follow = check_blank_to_none(self.mv.follow.Value)\n\n try:\n with self.mv.connection as con:\n con.execute(\n f\"UPDATE {Patient.__tablename__} SET past_history = ? WHERE id = ?\",\n (past_history, p.id),\n )\n con.execute(\n f\"\"\"\n UPDATE {Visit.__tablename__} SET ({Visit.commna_joined_field_names()})\n = ({Visit.qmark_style_placeholders()})\n WHERE id = ?\n \"\"\",\n (*v.qmark_style_sql_params(), v.id),\n )\n con.executemany(\n f\"\"\"\n UPDATE {LineDrug.__tablename__}\n SET (dose, times, quantity, usage_note, outclinic) = (?,?,?,?,?)\n WHERE id=?\n \"\"\",\n (\n (\n item.dose,\n item.times,\n item.quantity,\n item.usage_note,\n item.outclinic,\n item.id,\n )\n for item in state.old_linedrug_list\n ),\n )\n con.executemany(\n f\"DELETE FROM {LineDrug.__tablename__} WHERE id = ?\",\n ((item.id,) for item in state.to_delete_old_linedrug_list),\n )\n con.executemany(\n f\"\"\"\n INSERT INTO {LineDrug.__tablename__}\n ({LineDrug.commna_joined_field_names()})\n VALUES ({LineDrug.named_style_placeholders()})\n \"\"\",\n (\n dataclasses.asdict(item) | {\"visit_id\": v.id}\n for item in state.new_linedrug_list\n ),\n )\n con.executemany(\n f\"DELETE FROM {LineProcedure.__tablename__} where id = ?\",\n ((item.id,) for item in state.to_delete_old_lineprocedure_list),\n )\n con.executemany(\n f\"\"\"\n INSERT INTO {LineProcedure.__tablename__} ({LineProcedure.commna_joined_field_names()})\n VALUES ({LineProcedure.qmark_style_placeholders()})\n \"\"\",\n (\n (item.procedure_id, v.id)\n for item in state.new_lineprocedure_list\n ),\n )\n wx.MessageBox(\n \"Cập nhật lượt khám thành công\",\n \"Cập nhật lượt khám\",\n style=wx.OK_DEFAULT | wx.ICON_NONE,\n )\n if self.mv.config.ask_print:\n if (\n wx.MessageBox(\"In toa về?\", \"In toa\", style=wx.YES | wx.NO)\n == wx.YES\n ):\n printout = PrintOut(self.mv)\n wx.Printer(wx.PrintDialogData(printdata)).Print(\n self, printout, False\n )\n self.mv.state.refresh()\n except Exception as error:\n wx.MessageBox(f\"Lỗi không Cập nhật lượt khám được\\n{error}\", \"Lỗi\")\n","repo_name":"vuongkienthanh/SimpleClinic","sub_path":"src/ui/mainview_widgets/buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":11310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19005661960","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\nurl(r'^$', views.incidenteListar, name=\"incidente-listar\"),\nurl(r'^Crear/$', views.incidenteCrear, name=\"incidente-crear\"),\nurl(r'^Crear/Tipo/$', views.incidenteTipo, name=\"incidente-tipo\"),\nurl(r'^Detalle/(?P\\d+)$', views.incidenteDetalle, name=\"incidente-detalle\"),\nurl(r'^Cuerpo/(?P\\d+)$', views.cuerpoCrear, name=\"incidente-cuerpo\"),\nurl(r'^Estadistica/$', views.incidenteEstadistica, name=\"incidente-estadistica\"),\n]","repo_name":"GeOnE-357/ProyectoHuellas","sub_path":"incidente/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21598356410","text":"from html.parser import HTMLParser\nfrom urllib.parse import urljoin\n\nimport pandas as pd\nimport pkg_resources\nimport requests\n\n\n# DWD CDC HTTP server.\nbaseurl = \"https://opendata.dwd.de/\"\n\nstation_metadata = {\n \"Stations_id\": {\"name\": \"station_id\", \"type\": \"str\"},\n \"von_datum\": {\"name\": \"date_start\", \"type\": \"date\", \"format\": \"%Y%m%d\"},\n \"bis_datum\": {\"name\": \"date_end\", \"type\": \"date\", \"format\": \"%Y%m%d\"},\n \"Stationshoehe\": {\"name\": \"height\", \"type\": \"int64\"},\n \"geoBreite\": {\"name\": \"geo_lat\", \"type\": \"float64\"},\n \"geoLaenge\": {\"name\": \"geo_lon\", \"type\": \"float64\"},\n \"Stationsname\": {\"name\": \"name\", \"type\": \"str\"},\n \"Bundesland\": {\"name\": \"state\", \"type\": \"str\"},\n}\n\nstation_colnames_kv = {k: v[\"name\"] for k, v in station_metadata.items()}\nstation_coltypes_kv = {\n k: v[\"type\"] for k, v in station_metadata.items() if v[\"type\"] is not \"date\"\n}\nstation_datetypes_kv = [k for k, v in station_metadata.items() if v[\"type\"] is \"date\"]\n\nmeasurement_metadata = {\n \"STATIONS_ID\": {\"name\": \"station_id\", \"type\": \"str\"},\n \"MESS_DATUM\": {\"name\": \"date_start\", \"type\": \"date\", \"format\": \"%Y%m%d%H%M\"},\n \"QN\": {\"name\": \"QN\", \"type\": \"int64\"},\n \"PP_10\": {\"name\": \"PP_10\", \"type\": \"float64\"},\n \"TT_10\": {\"name\": \"TT_10\", \"type\": \"float64\"},\n \"TM5_10\": {\"name\": \"TM5_10\", \"type\": \"float64\"},\n \"RF_10\": {\"name\": \"RF_10\", \"type\": \"float64\"},\n \"TD_10\": {\"name\": \"TD_10\", \"type\": \"float64\"},\n}\n\nmeasurement_colnames_kv = {k: v[\"name\"] for k, v in measurement_metadata.items()}\nmeasurement_coltypes_kv = {\n k: v[\"type\"] for k, v in measurement_metadata.items() if v[\"type\"] is not \"date\"\n}\nmeasurement_datetypes_kv = [\n k for k, v in measurement_metadata.items() if v[\"type\"] is \"date\"\n]\n\n\n# Observations in Germany.\ngermany_climate_url = urljoin(\n baseurl, \"climate_environment/CDC/observations_germany/climate/\"\n)\nmosmix_s_forecast_url = urljoin(\n baseurl, \"weather/local_forecasts/mos/MOSMIX_S/all_stations/kml/\"\n)\n\n\ndef parse_htmllist(baseurl, content, extension=None, full_url=True):\n class ListParser(HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n self.data = []\n\n def handle_starttag(self, tag, attrs):\n if tag == \"a\":\n for attr in attrs:\n if attr[0] == \"href\" and attr[1] != \"../\":\n self.data.append(attr[1])\n\n parser = ListParser()\n parser.feed(content)\n paths = parser.data\n parser.close()\n\n if extension:\n paths = [path for path in paths if extension in path]\n\n if full_url:\n return [urljoin(baseurl + \"/\", path) for path in paths]\n else:\n return [path.rstrip(\"/\") for path in paths]\n\n\ndef get_resource_index(url, extension=\"\", full_url=True):\n \"\"\"\n Extract link list from HTML, given a url\n\n :params str url: url of a webpage with simple HTML link list\n :params str extension: String that should be matched in the link list; if \"\", all are returned\n \"\"\"\n\n response = requests.get(url)\n if response.status_code != 200:\n raise ValueError(f\"Fetching resource {url} failed\")\n resource_list = parse_htmllist(url, response.text, extension, full_url)\n return resource_list\n\n\ndef get_stations_lookup():\n \"\"\"Return station lookup.\"\"\"\n csv_file = pkg_resources.resource_filename(\"dwdbulk\", \"station_lookup.csv\")\n return pd.read_csv(csv_file, dtype=str)\n\n\ndef y2k_date_parser(col, date_format=\"%Y%m%d%H%M\"):\n \"\"\"Parse dates according to typical DWD spec: CET up to 2000, UTC thereafter.\"\"\"\n\n y2k = pd.Timestamp(\"2000-01-01\")\n\n col_raw = pd.to_datetime(col, format=date_format)\n col_raw = col_raw.to_series(keep_tz=True).reset_index(drop=True)\n\n pre_y2k_bool = col_raw < y2k\n\n col_raw[pre_y2k_bool] = (\n col_raw[pre_y2k_bool]\n .dt.tz_localize(tz=\"CET\", ambiguous=\"NaT\", nonexistent=\"NaT\")\n .dt.tz_convert(\"UTC\")\n )\n col_raw[~pre_y2k_bool] = col_raw[~pre_y2k_bool].dt.tz_localize(tz=\"UTC\")\n\n return pd.to_datetime(col_raw, utc=True)\n","repo_name":"jeremiahpslewis/dwdbulk","sub_path":"dwdbulk/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"1864839296","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n\"\"\"\r\nCopyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.\r\n\r\nThis program is free software; you can redistribute it and/or modify\r\nit under the terms of the Apache License Version 2.0.You may not use this file\r\nexcept in compliance with the License.\r\n\r\nThis program is distributed in the hope that it will be useful,\r\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\r\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\nApache License for more details at\r\nhttp://www.apache.org/licenses/LICENSE-2.0\r\n\r\nadd cheque to specify bank\r\n\"\"\"\r\nimport os\r\nimport pickle\r\nimport traceback\r\nfrom te import platform as cceconf\r\nfrom te.lang.cce.rl_bank.rl_bank import add_case\r\nfrom te.lang.cce.rl_bank.cheque import gen_cheque\r\nfrom te.lang.cce.rl_bank.rl_bank import get_bank_path\r\nfrom te.lang.cce.rl_bank.rl_bank import get_bank_name\r\n\r\n\r\ndef get_output_tensors(output_tensors, output_names, load_obj):\r\n \"\"\"\r\n get real output tensors\r\n :param output_tensors:\r\n :param output_names:\r\n :param load_obj:\r\n :return:\r\n \"\"\"\r\n for output_name in output_names:\r\n for i in range(len(load_obj.stages)):\r\n stage = load_obj.stages[i]\r\n # support for tuple_reduce_sum\r\n if output_name.startswith(stage.op.name + '_v'):\r\n out_idx = int(output_name.split('_v')[-1])\r\n output_tensors.append(stage.op.output(out_idx))\r\n elif output_name == stage.op.name:\r\n out_idx = 0\r\n output_tensors.append(stage.op.output(out_idx))\r\n\r\n\r\ndef add_cheque_to_bank(sch_py_path, bank_type, kernel_name=\"\"):\r\n \"\"\"\r\n add_cheque_to_bank\r\n :param sch_py_path:\r\n :param bank_type:\r\n :param kernel_name:\r\n :return:\r\n \"\"\"\r\n\r\n if not os.path.exists(sch_py_path):\r\n raise RuntimeError(\"%s not exists\" % sch_py_path)\r\n\r\n if bank_type not in [\"custom\", \"built-in\"]:\r\n raise RuntimeError(\"bank_type must be custom or built-in,while is %s\" % bank_type)\r\n\r\n with open(sch_py_path, 'r') as file_handler:\r\n shcedule_code_str = file_handler.read()\r\n\r\n tick = int(os.path.basename(sch_py_path).split('_')[0])\r\n\r\n output_tensors = []\r\n output_names = []\r\n code_line_list = shcedule_code_str.split(\"\\n\")\r\n for code_line in code_line_list:\r\n if \"#op_outputs:\" in code_line:\r\n output_names = [\r\n output.strip() for output in code_line.split(\"#op_outputs:\")[1].split(\",\")\r\n ]\r\n elif \"pickle.loads(\" in code_line:\r\n tensor_pickle_byte = code_line.split(\"pickle.loads(b'\")[-1][:-2].encode(\r\n 'ISO-8859-1').decode('unicode-escape').encode('ISO-8859-1')\r\n load_obj = pickle.loads(tensor_pickle_byte)\r\n get_output_tensors(output_tensors, output_names, load_obj)\r\n\r\n if not output_tensors:\r\n raise RuntimeError(\"get output_tensors from schedule py file fail!!!\")\r\n\r\n cheque_list = gen_cheque(sch_py_path, kernel_name=kernel_name)\r\n\r\n bank_dir = get_bank_path()\r\n soc_version = cceconf.get_soc_spec(\"SOC_VERSION\")\r\n bank_name = get_bank_name(soc_version)\r\n bank_json_path = os.path.join(bank_dir, soc_version, bank_type, \"%s.json\" % bank_name)\r\n ret = add_case(output_tensors, cheque_list, tick, bank_json_path)\r\n if ret:\r\n return True\r\n return False\r\n\r\n\r\ndef try_add_cheque(sch_py_path, bank_type, kernel_name=\"\"):\r\n \"\"\"\r\n try_add_cheque\r\n :param sch_py_path:\r\n :param bank_type:\r\n :param kernel_name:\r\n :return:\r\n \"\"\"\r\n try:\r\n ret = add_cheque_to_bank(sch_py_path, bank_type, kernel_name=kernel_name)\r\n return ret, \"\"\r\n except Exception: # pylint: disable=broad-except\r\n return False, \"sch_py_path:%s add cheque to %s bank fail:%s\" % (sch_py_path, bank_type,\r\n traceback.format_exc())\r\n","repo_name":"jizhuoran/caffe-huawei-atlas-convertor","sub_path":"convertor/huawei/te/lang/cce/rl_bank/add_cheque.py","file_name":"add_cheque.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"43159804105","text":"\"\"\" Test the data transformer \"\"\"\n\nimport shutil\nimport os\nimport pandas as pd\n\nfrom src.data.dataset_generator import DatasetGenerator\nfrom src.data.data_transformer import DataTransformer\n\n\ndef test_transforming():\n dataset = DatasetGenerator().get_dataset('2018-08-01', '2019-01-01', 'T16')\n transformer = DataTransformer()\n transformed_data = transformer.fit_transform(dataset)\n assert transformed_data.equals(transformer.transform_data(dataset))\n reverse_data = transformer.reverse_transform(transformed_data)\n\n assert dataset.round(4).equals(reverse_data.round(4))\n\n\ndef test_reverse_spot():\n dataset = DatasetGenerator().get_dataset('2018-08-01', '2019-01-01', 'T16')\n transformer = DataTransformer()\n transformed_data = transformer.fit_transform(dataset)\n transformed_data2 = transformer.transform_data(dataset)\n pd.testing.assert_frame_equal(transformed_data, transformed_data2)\n reverse_spot_data = transformer.reverse_transform(\n transformed_data).SPOTPrice.values\n spot_data = transformer.reverse_transform_spot(transformed_data.SPOTPrice.values)\n for i in range(spot_data.size):\n assert spot_data[i] == reverse_spot_data[i]\n assert spot_data[i].round(4) == dataset.SPOTPrice.values[i].round(4)\n\n\ndef test_save_load():\n dataset = DatasetGenerator().get_dataset('2018-08-01', '2019-01-01', 'T16')\n transformer = DataTransformer()\n transformed_data = transformer.fit_transform(dataset)\n shutil.rmtree(f'data/test_models/transformer/', ignore_errors=True)\n os.makedirs(f'data/test_models/transformer/')\n transformer.save('data/test_models/transformer/')\n\n new_transformer = DataTransformer.load('data/test_models/transformer/')\n transformed_data2 = new_transformer.transform_data(dataset)\n pd.testing.assert_frame_equal(transformed_data, transformed_data2)\n shutil.rmtree(f'data/test_models/transformer/', ignore_errors=True)\n","repo_name":"kerstinforster/electricity-price-forecasting","sub_path":"test/test_data/test_data_transformer.py","file_name":"test_data_transformer.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27748328560","text":"from nltk import word_tokenize\nfrom nltk.stem import PorterStemmer\nimport pickle\nimport re\nimport langid\nfrom sinling import SinhalaTokenizer, POSTagger\nimport pandas as pd\nimport csv\nimport string\n\n# Model Code\ndata = pd.read_csv(\"Dataset/DataSetShuffled.csv\")\ntotal_questions = len(data)\nprint(\"Number of rows : \", total_questions)\n\n\n# # Once the shuffling of the data set is completed this set of code should be commented out\n# # Shuffling the dataset\n# questiondata1 = data.sample(frac=1).reset_index(drop=True)\n#\n# # Saving the shuffled dataset to a nes CSV file\n# questiondata1.to_csv(\"Dataset/DataSetShuffled.csv\", index=False)\n\n\n# The preprocessing method of the model\ndef preprocess_and_tokenize(data):\n # remove html markup\n data = re.sub(\"(<.*?>)\", \"\", data)\n\n # remove urls\n data = re.sub(r'http\\S+', '', data)\n\n # remove hashtags and @names\n data = re.sub(r\"(#[\\d\\w\\.]+)\", '', data)\n data = re.sub(r\"(@[\\d\\w\\.]+)\", '', data)\n\n # remove punctuation and non-ascii digits\n data = re.sub(\"(\\\\W|\\\\d)\", \" \", data)\n\n # remove whitespace\n data = data.strip()\n\n # tokenization with nltk\n data = word_tokenize(data)\n\n # stemming with nltk\n porter = PorterStemmer()\n stem_data = [porter.stem(word) for word in data]\n\n return stem_data\n\n\nnum_of_correct_answers = 0\nwith open('Dataset/DataSetShuffled.csv', encoding=\"utf8\") as file_obj:\n heading = next(file_obj)\n\n reader_obj = csv.reader(file_obj)\n\n for row in reader_obj:\n question = row[0]\n\n filename = 'Model2.sav' # Model file\n\n model = pickle.load(open(filename, 'rb')) # loading the model\n category = model.predict([question])[0] # Predicting the category of the question\n # print(\"Question Category : \", category)\n\n # question = re.sub(\"(<.>)\", \"\", question)\n\n # Tokenization with sinling SinhalaTokenizer (''https://github.com/ysenarath/sinling)\n tokenizer = SinhalaTokenizer()\n tokenized_sentences = [tokenizer.tokenize(f'{ss}.') for ss in tokenizer.split_sentences(question)]\n\n tagger = POSTagger()\n pos_tags = tagger.predict(tokenized_sentences)\n\n # Identifying the numbers in the question and assigning them into the variable\n keyword_numbers = [(word, tag) for word, tag in pos_tags[0] if (tag == 'NUM')]\n\n # print('Keyword Numbers : ', keyword_numbers)\n\n num_1 = keyword_numbers[0][0]\n num_2 = keyword_numbers[1][0]\n\n number_1 = int(num_1)\n number_2 = int(num_2)\n\n # print(\"num 1 : \", num_1, \" num 2 : \", num_2)\n\n # Calulation for Multiplication\n if category == 'Multiplication':\n answer = number_1 * number_2 # calculating the answer\n\n elif category == 'Division':\n if number_1 > number_2:\n answer = number_1 / number_2 # calculating the answer\n\n elif number_2 > number_1:\n answer = number_2 / number_1 # calculating the answer\n\n elif category == 'Addition':\n answer = number_1 + number_2 # calculating the answer\n\n elif category == 'Subtraction':\n if number_1 > number_2:\n answer = number_1 - number_2 # calculating the answer\n\n elif number_2 > number_1:\n answer = number_2 - number_1 # calculating the answer\n\n actual_answer = int(row[1])\n generated_answer = int(answer)\n\n print('Preticted Answer : ', generated_answer)\n print('Actual Answer : ', actual_answer)\n\n if generated_answer == actual_answer:\n print(\"hello\")\n num_of_correct_answers = num_of_correct_answers + 1\n\nprint(\"Number of Correct Answers : \", num_of_correct_answers)\nprint(\"Total Number of Questions : \", total_questions)\n\nsystem_accuracy = (num_of_correct_answers / total_questions) * 100\n\nprint(\"System Accuracy : \", system_accuracy)\n","repo_name":"SHENAL1/Sinhala-Mathematical-Question-Answering-System---FYP","sub_path":"Frontend/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"26360304784","text":"import webbrowser\nfrom utils.retry_decorator import retry_input\nfrom game_engine.scene.scene import Scene\nfrom game_engine.scene.dice import dice_scene\nfrom game_engine.scene.too_sober import too_sober\nfrom game_engine.scene.fight import fight_scene\nfrom game_engine.scene.earthquake import earthquake_scene\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from game_engine.game.game import Game\n\n\nclass SceneManager:\n @retry_input\n def get_choice(self, scene: Scene) -> int:\n print(scene.render_choices())\n return int(input('Pick: '))\n\n def handle_choice(self, scene: Scene) -> str:\n user_choice = self.get_choice(scene)\n if user_choice == 0:\n exit()\n\n print(scene.render_result(user_choice))\n return str(user_choice)\n\n def handle_next_scene(self, next_scene: str, game: 'Game') -> str:\n if next_scene == 'dice':\n return dice_scene(game)\n\n if next_scene == 'fightTime':\n return fight_scene(game)\n\n if next_scene == 'earthquake':\n return earthquake_scene(game)\n\n if next_scene == 'goHome':\n game.pint_counter.update_pints(-3)\n\n return next_scene\n\n def manage(self, scene: Scene, game: 'Game') -> str:\n if scene.return_name() == 'win':\n game.victory()\n\n if game.pint_counter.get_pints() <= 0:\n return too_sober()\n\n print(scene.render_intro({\n \"main_character\": game.main_character.getName(),\n \"superhero\": game.superhero.getName(),\n \"pints\": game.pint_counter.get_pints(),\n \"prize\": game.prize,\n \"insult\": game.insult\n }))\n\n if not scene.return_options():\n print('THE END')\n return 'end_scene'\n\n scene_choice = scene.options[self.handle_choice(scene)]\n if scene_choice['choice'] == 'Ask Rick':\n webbrowser.open('https://www.youtube.com/watch?v=dQw4w9WgXcQ')\n return 'lose'\n\n return self.handle_next_scene(scene_choice['nextScene'], game)\n","repo_name":"louannl/bar-sim","sub_path":"game_engine/scene/scene_manager.py","file_name":"scene_manager.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73346394506","text":"import numpy as np\nimport random\nimport math \nimport matplotlib.pyplot as pyplot\nfrom mpl_toolkits.mplot3d import Axes3D\nimport os, sys\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\nfrom keras.datasets import mnist\n\n(train_X, train_y), (test_X, test_y) = mnist.load_data()\n\nitem = int(random.random() * len(train_X))\n\nif len(sys.argv) > 1:\n item = int(sys.argv[1])\n\nlabel = train_y[item]\nsource_points = train_X[item]\n\nprint(\"item \" + str(item) + \" - label is \" + str(label))\n\nzero_min = np.zeros((28, 28))\none_max = np.ones((28, 28))\n\ndef gaussian_noise(X, sigma):\n ''' adds a gaussian noise limited to 0 and 1 inclusive'''\n X_nonzero_indexes = np.nonzero(X)\n noise = np.random.normal(0, sigma, X.shape)\n copy = X.copy()\n copy[X_nonzero_indexes] = np.minimum(np.maximum(X[X_nonzero_indexes] + \n noise[X_nonzero_indexes], zero_min[X_nonzero_indexes]), one_max[X_nonzero_indexes])\n return copy\n\ndef convert_to_rgb(gray_image, color_map):\n '''Convert gray image to RGB using the given color map'''\n s_m = pyplot.cm.ScalarMappable(cmap = color_map)\n img_shape = gray_image.shape\n flattened = gray_image.flatten()\n colors = s_m.to_rgba(flattened)\n result = np.zeros(flattened.shape + (3,))\n\n for i in range(len(flattened)):\n if flattened[i] > 0:\n result[i] = colors[i][:-1]\n return result.reshape(img_shape + (3,))\n\ndef create_colored_grid(image, depth, color_map):\n grid = np.zeros(image.shape + (depth, 3,))\n for z in range(depth):\n instance_points = gaussian_noise(image, 0.2)\n rgb_points = convert_to_rgb(instance_points, color_map)\n grid[:, :, z] = rgb_points\n return grid\n\ncolor_maps = ['viridis', 'plasma', 'inferno', 'magma', 'cividis', \n 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',\n 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu']\n\ndef transform_instance(instance, color_map, x_rot, y_rot, z_rot):\n\n print(\"Using color_map: \" + color_map)\n\n grid = create_colored_grid(instance / 255.0, 28, color_map)\n\n result = rotate(grid, z_rot, y_rot, x_rot)\n\n return result\n\ndef print_grid(grid):\n\n grid_shape = grid.shape\n\n flattened = grid.reshape(((grid_shape[0] * grid_shape[1] * grid_shape[2]), 3))\n voxel_grid_array = np.zeros(len(flattened))\n\n for i in range(len(flattened)):\n temp = flattened[i]\n if temp[0] > 0 or temp[1] > 0 or temp[2] > 0:\n voxel_grid_array[i] = 1\n\n voxel_grid = voxel_grid_array.reshape((grid_shape[0], grid_shape[1], grid_shape[2]))\n\n fig = pyplot.figure()\n ax = fig.add_subplot(projection='3d')\n ax.azim = 15\n ax.dist = 8\n ax.elev = 75\n ax.voxels(voxel_grid, facecolors=grid)\n\n pyplot.show(block=False)\n\n return fig\n\ndef rotate(grid, z_ang, y_ang, x_ang):\n\n grid_shape = grid.shape\n\n result = np.zeros(grid_shape)\n\n x_lim = grid_shape[0]\n y_lim = grid_shape[1]\n z_lim = grid_shape[2]\n\n for i in range(x_lim):\n for j in range(y_lim):\n for k in range(z_lim):\n\n X = i\n Y = j\n Z = k\n\n if z_ang != 0:\n x = i - x_lim / 2\n y = j - y_lim / 2\n X = int(math.floor(x*math.cos(z_ang) - y*math.sin(z_ang) + x_lim / 2))\n Y = int(math.floor(x*math.sin(z_ang) + y*math.cos(z_ang) + y_lim / 2)) \n\n if y_ang != 0:\n x = X - x_lim / 2\n z = Z - z_lim / 2\n X = int(math.floor(x*math.cos(y_ang) - z*math.sin(y_ang) + x_lim / 2))\n Z = int(math.floor(x*math.sin(y_ang) + z*math.cos(y_ang) + z_lim / 2)) \n\n if x_ang != 0:\n y = Y - y_lim / 2\n z = Z - z_lim / 2\n Y = int(math.floor(y*math.cos(x_ang) - z*math.sin(x_ang) + y_lim / 2))\n Z = int(math.floor(y*math.sin(x_ang) + z*math.cos(x_ang) + z_lim / 2)) \n\n if X >= 0 and Y >= 0 and Z >= 0 and X < x_lim and Y < y_lim and Z < z_lim:\n result[X, Y, Z] = grid[i, j, k]\n\n return result\n\ncolor_map = None\nif len(sys.argv) > 2:\n test = sys.argv[2]\n if test in color_maps:\n color_map = test\n else:\n print(test + \" was not recognized as a valid color scheme\")\n\nif not color_map:\n color_map = random.choice(color_maps)\n\nx_rot = 0 * math.pi / 180.0\ny_rot = 0 * math.pi / 180.0\nz_rot = 0 * math.pi / 180.0\n\nmy_cube = transform_instance(source_points, color_map, x_rot, y_rot, z_rot)\n\nfig = print_grid(my_cube)\n\nname = input(\"Enter the name of image (empty to no save) : \").strip()\n\nif name:\n file_path = \"/tmp/\" + name + '.png'\n fig.savefig(file_path, dpi=fig.dpi)\n print(\"Saved image at \" + file_path)\n","repo_name":"doleron/augmented_3d_mnist","sub_path":"voxel_grid_example.py","file_name":"voxel_grid_example.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11628659668","text":"import numpy as np\nimport librosa as lb\n\nfrom app.Controllers import Utilities,Preprocessor,FeatureExtractor\nfrom app.Classifiers import AbnormalityClassifier, DisorderClassifier\n\nutils=Utilities.Utilities()\npreprocessor = Preprocessor.Preprocessor()\nfeatureExtractor = FeatureExtractor.FeatureExtractor()\nabmormalityClassifier = AbnormalityClassifier.AbnormalityClassifier()\ndisorderClassifier = DisorderClassifier.DisorderClassifier()\n\nclass Analyser:\n def __init__(self):\n pass\n\n def analyse(self,signaldata,samplingrate):\n \n print(len(signaldata),samplingrate)\n\n #step1: preprocessing\n #step2: feature extraction\n #step3: abnormality analysis\n #step4: disorder analysis\n #step5: severity analysis\n\n #step1: preprocessing\n #step1 a: resampling\n signaldata = lb.resample(y=signaldata, orig_sr=samplingrate, target_sr=22050)\n #step1 b: padding\n padded_segment=[]\n if len(signaldata)<(6*22050):\n padded_segment = lb.util.pad_center(signaldata, 6*22050)\n else:\n padded_segment = padded_segment[0:6*22050]\n #step1 c: filtering\n filtered_segment = np.array(preprocessor.get_filtered_segment(padded_segment,samplingrate=22050))\n \n #step2: feature extraction\n mfcc = featureExtractor.get_mfcc(filtered_segment,22050)\n spec = featureExtractor.get_mel_spectrogram(filtered_segment,22050)\n chroma_stft = featureExtractor.get_chroma_stft(filtered_segment,22050)\n \n mfcc_arr = np.array([mfcc])\n spec_arr = np.array([spec])\n chroma_stft_arr = np.array([chroma_stft])\n\n #step3: abnormality analysis\n abnormality_classes,abmormality_probabilities = abmormalityClassifier.predict(mfcc_arr,chroma_stft_arr,spec_arr) \n #step4: disorder analysis\n disorder_classes,disorder_probabilities = disorderClassifier.predict(mfcc_arr,chroma_stft_arr,spec_arr)\n #step5: severity analysis\n\n abnormality_object = {}\n for i,c in enumerate(abnormality_classes):\n abnormality_object[c] = abmormality_probabilities[i]\n\n disorder_object = {}\n for i,c in enumerate(disorder_classes):\n disorder_object[c] = disorder_probabilities[i]\n\n result={\n \"abnormality\":abnormality_object,\n \"disorder\":disorder_object,\n \"severity\": 1\n }\n return result\n","repo_name":"Aditya-Dawadikar/Pulmonary-Disease-Prediction-Flask-Server","sub_path":"app/Controllers/Analyser.py","file_name":"Analyser.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33304730776","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Item\nfrom .forms import ItemForm\nfrom django.core.mail import send_mail\nfrom .token import generated_access_code\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.conf import settings\n\ndef welcome(request):\n # make the user code global\n return render(request, 'welcome.html')\n\n@login_required\n# @login_required\ndef index(request):\n return render(request, 'dashboard/index.html')\n\n\n@login_required\ndef staff(request):\n workers = User.objects.all()\n \n context = {\n 'workers': workers,\n }\n return render(request, 'dashboard/staff.html', context)\n\n@login_required\ndef staff_details(request, pk):\n workers = User.objects.get(id=pk)\n context = {\n 'workers': workers,\n }\n return render(request, 'dashboard/staff_details.html', context)\n\n@login_required\ndef item(request):\n items = Item.objects.all()\n context = {\n 'items': items,\n }\n return render(request, 'dashboard/item.html', context)\n\n\n@login_required\ndef item_delete(request, pk):\n item = Item.objects.get(id=pk)\n if request.method == 'POST':\n item.delete()\n return redirect('dashboard-item')\n return render(request, 'dashboard/item_delete.html')\n\n@login_required\ndef item_update(request, pk):\n item = Item.objects.get(id=pk)\n if request.method == 'POST':\n form = ItemForm(request.POST, instance=item)\n if form.is_valid():\n form.save()\n return redirect('dashboard-item')\n else:\n form = ItemForm(instance=item)\n \n context = {\n 'form': form,\n }\n return render(request, 'dashboard/item_update.html', context)\n\n@login_required\ndef update(request):\n return render(request, 'update.html')\n\n\n@login_required\ndef add(request):\n if request.method == 'POST':\n form = ItemForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('dashboard-item')\n else:\n form = ItemForm()\n context = {\n 'form': form,\n }\n return render(request, 'dashboard/add.html', context)\n\n# function to send an email to a user:: call the func and pass the email_to and token/code\ndef sendEmail(secret_code):\n subject = 'Add Item: Verification Code'\n message = f'Hi, We have noticed urge to update items. Your access code is {secret_code}.'\n email_from = settings.EMAIL_HOST_USER\n email_to = 'mugizi@duck.com'\n recipient_list = [email_to, ]\n\n send_mail(subject, message, email_from, recipient_list)\n # print('Email has been sent successfully to mugizi@duck.com')\n\n@login_required\ndef validate_code(request):\n if request.method == 'POST':\n entered_code = request.POST.get('securitycode')\n user_code = generated_access_code()\n sendEmail(user_code)\n # Perform the validation logic\n if entered_code == user_code: \n messages.success(request, 'Code verified successfully!')\n return redirect('add')\n else:\n # if the code is invalid, display an error message\n error_message = 'Invalid code. Please try again.'\n messages.error(request, error_message)\n return redirect('dashboard-index')\n return redirect('dashboard-index') # Redirect to the home page if the form is not submitted\n\n\n@login_required\n# function to generate a random code\ndef generateCode(request):\n # Generate a random code\n code = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))\n print(\"The generated code to be sent is \", code)\n return HttpResponse(status=200)","repo_name":"NakacwaOlivia/Expenditure_System","sub_path":"expenditure_system/dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27432471452","text":"import cv2\n\ncap = cv2.VideoCapture(0) # open the default camera\ncount = 0 # initialize counter to zero\n\nwhile True:\n ret, frame = cap.read() # read a frame from the camera\n \n # Display the count on the frame\n text = 'Samples taken: ' + str(count)\n cv2.putText(frame, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n \n cv2.imshow('frame', frame) # show the frame\n \n if cv2.waitKey(1) & 0xFF == ord('q'): # wait for a key event\n break\n elif cv2.waitKey(1) & 0xFF == ord('s'): # take a sample image on 's' key press\n ret, sample_frame = cap.read() # read a sample frame from the camera\n cv2.imwrite('sample_image_' + str(count) + '.jpg', sample_frame) # save the sample image\n count += 1 # increment the counter\n\ncap.release() # release the camera\ncv2.destroyAllWindows() # close all windows\n","repo_name":"SivaRamana-H-V/FaceReg","sub_path":"cv.py","file_name":"cv.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28525555817","text":"'''\nPutthipong Phukhansung\n633040224-3\nProblem 5\n'''\n\ndef add_emails():\n\n global email_info\n course_list = []\n email_info = {}\n\n\n while True:\n email = input(\"Enter an email address: \")\n if email == 'q' or email == 'Q':\n break\n\n course = input(\"Enter a course: \")\n if course == 'q' or course == 'Q':\n break\n course_list.append(course)\n\n email_info[email] = course\n return email_info\n\n\ndef print_email_info(arg):\n print(arg)\n\n\ndef print_course_info(arg):\n print(\"=== Writing emails and courses to file student.json ===\")\n print(\"=== Reading emails and course from file students.json ===\")\n print(end=\" \")\n print(arg)\n\n#\nimport json\ndef read_file(): \n with open('student.json') as file_json:\n data_read = json.load(file_json)\n for i in data_read[\"info\"]:\n print(i[\"email\"], end= \"regiprintstered for course\")\n print(i[\"course\"])\n#\n\n\nif __name__ == \"__main__\":\n email_course = add_emails()\n print_email_info(email_course)\n print_course_info(email_course)","repo_name":"cyascammer/Putthipong-2243-oop-labs","sub_path":"Putthipong-2243-lab6/p5.py","file_name":"p5.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38617661097","text":"from datetime import datetime\nimport re\n\n\nclass Field:\n\n def __init__(self, value):\n self._value = None\n self.value = value\n\n def type(self):\n return type(self).__name__\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n self._value = value\n\n\nclass Address(Field):\n pass\n\n\nclass Birthday(Field):\n\n @Field.value.setter\n def value(self, data: str):\n\n birthday = re.search(r\"\\d{2}\\.\\d{2}\", data)\n\n if not birthday:\n raise ValueError(\"Birthday not valid.\\n\"\n \"The Birthday should look like '01.01'\")\n\n self._value = datetime.strptime(birthday.group(), \"%d.%m\")\n\n\nclass Email(Field):\n\n @Field.value.setter\n def value(self, email: str):\n new_email = re.search(r\".+@.+\", email)\n\n if not new_email:\n raise ValueError(\"Email not valid.\")\n\n self._value = new_email.group()\n\n\nclass Name(Field):\n pass\n\n\nclass Phone(Field):\n\n @Field.value.setter\n def value(self, phone: str):\n new_phone = re.search(r\"\\+380\\d{9}\", phone)\n\n if not new_phone:\n raise ValueError(\"Phone number not valid.\\n\"\\\n \"The phone number should look like +380123456789\")\n \n self._value = new_phone.group()\n\n\nclass Record:\n\n def __init__(self, name: Name):\n\n self.birthday = None\n self.name = name\n self.fields = {}\n\n def add_field(self, field: Field):\n\n if self.fields.get(field.type()):\n self.fields[field.type()].append(field)\n \n else:\n self.fields.update({field.type(): [field]})\n \n\n def change_field(self, type_field: str, number_in_list: int, new_field: str):\n self.fields[type_field][number_in_list].value = new_field\n\n def days_to_birthday(self):\n\n if not self.birthday:\n raise ValueError(\"Birthday not specified\")\n\n now_date = datetime.now()\n birthday = self.birthday.value.replace(year=now_date.year)\n\n return (birthday - now_date).days + 1 \n\n def remove_field(self, number_in_list: int, type_field: str):\n return self.fields[type_field].pop(number_in_list)\n","repo_name":"alex-kondr/Python","sub_path":"Python core/Project/console_bot/console_bot/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12006569700","text":"\"\"\"\nadmin.py\n\"\"\"\nfrom django.contrib import admin\nfrom django.contrib import messages\n\nfrom .models import Element\n\n\nclass ComposerElementAdmin(admin.ModelAdmin):\n \"\"\"\n Admin for the composer elements.\n \"\"\"\n fields = ('content', 'context_example')\n readonly_fields = (\n 'template_name', 'name', 'is_dynamic', 'has_changed', 'last_changed', 'changed_by')\n list_display = ('template_name', 'name', 'has_changed', 'last_changed', 'changed_by')\n list_filter = ('is_dynamic', 'has_changed')\n\n def save_model(self, request, obj, form, change):\n \"\"\"\n Attempt to save the model.\n \"\"\"\n try:\n obj.attempt_update(form.cleaned_data['content'], request.user)\n except Exception as e:\n messages.error(request, 'Could not update element, %s' % str(e))\n\n\nadmin.site.register(Element, ComposerElementAdmin)\n","repo_name":"arora879/mechanical-django","sub_path":"learn-env/lib/python3.8/site-packages/composer/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29473283077","text":"from djitellopy import Tello\nimport cv2\nimport time\nfrom threading import Thread\n\n\nclass VideoStreamTello(object):\n def __init__(self, unit_dp=30, window_name=\"Drone Camera\"):\n # Establish Tello() object\n self.tello = Tello()\n\n # Connect to the tello\n self.tello.connect()\n\n # Query and print out the battery percentage\n self.query_battery()\n\n # Turn on the video stream from the tello\n self.tello.streamon()\n\n # Get the current video feed frame and convert into an image (for\n # display purposes)\n self.camera_frame = self.tello.get_frame_read()\n self.img = self.camera_frame.frame\n\n # Establish object attributes\n self.unit_dp = unit_dp # Length of spatial displacement\n self.window_name = window_name # Name of the video stream popup window\n self.landed = (\n True # Boolean flag to determine whether the tello is on the ground\n )\n self.stream = True # Boolean flag to determine whether the tello should be streaming or not\n self.popup = True\n self.main_loop = True\n\n ##########################\n # Velocity attributes\n ##########################\n\n # Velocity constants\n self.velocity_val = 20\n self.no_velocity = 0\n\n # Velocity params - TODO: Check that these values align with expected\n self.left_velocity = -self.velocity_val\n self.right_velocity = self.velocity_val\n self.forward_velocity = self.velocity_val\n self.backward_velocity = -self.velocity_val\n self.up_velocity = -self.velocity_val\n self.down_velocity = self.velocity_val\n self.left_turn_velocity = -self.velocity_val\n self.right_turn_velocity = self.velocity_val\n\n # Threading is necessary to concurrently display the live video feed\n # and get keystrokes from user\n self.video_stream_t = Thread(target=self.update_frame, args=())\n self.video_stream_t.start()\n\n def query_battery(self):\n \"\"\"\n Method to query and print the current battery percentage of the tello\n \"\"\"\n print(f\"Battery Life: {self.tello.query_battery()}%\")\n\n def update_frame(self):\n \"\"\"\n Method to update the live video feed from the tello (thread-based)\n \"\"\"\n while self.stream:\n try:\n # Get the current image frame from the video feed and display\n # in a popup window\n self.camera_frame = self.tello.get_frame_read()\n self.img = self.camera_frame.frame\n cv2.imshow(self.window_name, self.img)\n # 'waitKey' is necessary to properly display a cv2 popup window\n cv2.waitKey(1)\n except KeyboardInterrupt:\n break\n\n # Once we are no longer interested in streaming, land the tello and exit out of all windows\n # self.killSequence()\n\n def poll_keystrokes(self):\n \"\"\"\n Method to capture user input (for tello-based movements)\n \"\"\"\n command = input(\"Enter input: \")\n\n if command == \"kill\":\n print(f\"self kill\")\n self.killSequence()\n elif command == \"w\":\n # Move forward\n self.tello.send_rc_control(\n left_right_velocity=self.no_velocity,\n forward_backward_velocity=self.forward_velocity,\n up_down_velocity=self.no_velocity,\n yaw_velocity=self.no_velocity,\n )\n elif command == \"s\":\n # Move backward\n self.tello.send_rc_control(\n left_right_velocity=self.no_velocity,\n forward_backward_velocity=self.backward_velocity,\n up_down_velocity=self.no_velocity,\n yaw_velocity=self.no_velocity,\n )\n elif command == \"a\":\n # Move left\n self.tello.send_rc_control(\n left_right_velocity=self.left_velocity,\n forward_backward_velocity=self.no_velocity,\n up_down_velocity=self.no_velocity,\n yaw_velocity=self.no_velocity,\n )\n elif command == \"d\":\n # Move right\n self.tello.send_rc_control(\n left_right_velocity=self.right_velocity,\n forward_backward_velocity=self.no_velocity,\n up_down_velocity=self.no_velocity,\n yaw_velocity=self.no_velocity,\n )\n elif command == \"e\":\n # Turn right\n self.tello.send_rc_control(\n left_right_velocity=self.no_velocity,\n forward_backward_velocity=self.no_velocity,\n up_down_velocity=self.no_velocity,\n yaw_velocity=self.right_turn_velocity,\n )\n elif command == \"q\":\n # Turn left\n self.tello.send_rc_control(\n left_right_velocity=self.no_velocity,\n forward_backward_velocity=self.no_velocity,\n up_down_velocity=self.no_velocity,\n yaw_velocity=self.left_turn_velocity,\n )\n elif command == \"stop\":\n # Stop all movement\n self.tello.send_rc_control(\n left_right_velocity=self.no_velocity,\n forward_backward_velocity=self.no_velocity,\n up_down_velocity=self.no_velocity,\n yaw_velocity=self.no_velocity,\n )\n elif command == \"r\":\n # Move up\n self.tello.move_up(self.unit_dp)\n elif command == \"f\":\n # Move down\n self.tello.move_down(self.unit_dp)\n elif command == \"l\":\n # Land\n self.tello.land()\n self.landed = True\n elif (command == \"t\") and (self.landed == True):\n # Takeoff\n self.tello.takeoff()\n self.landed = False\n elif command == \"diag\":\n print(f\"diag\")\n self.diag()\n else:\n print(f\"command: {command}\")\n\n def diag(self):\n print(f\"stream: {self.stream}\")\n print(f\"landed: {self.landed}\")\n print(f\"main_loop: {self.main_loop}\")\n\n def killSequence(self):\n print(f\"killing...\")\n\n if self.main_loop:\n self.main_loop = False\n\n if self.stream:\n self.tello.streamoff()\n self.stream = False\n\n if not self.landed:\n self.tello.land()\n self.landed = True\n\n if self.popup:\n cv2.destroyWindow(self.window_name)\n cv2.destroyAllWindows()\n self.popup = False\n\n\nif __name__ == \"__main__\":\n # Start timing how long this script takes to run\n start_time = time.time()\n\n # Create a tello object to connect to the tello and initialize the video stream and get user input\n tello_video_stream = VideoStreamTello()\n\n # Start the video stream and user input threads\n while tello_video_stream.main_loop:\n try:\n tello_video_stream.poll_keystrokes()\n except KeyboardInterrupt:\n print(f\"!!!Interrupted!!!\")\n tello_video_stream.main_loop = False\n tello_video_stream.killSequence()\n tello_video_stream.video_stream_t.join()\n\n # End timing how long this script takes to run\n end_time = time.time()\n\n # Print out how long this script took to run\n print(f\"Total runtime: {end_time - start_time} seconds\")\n","repo_name":"BruceCoburn/ML4HST_drone","sub_path":"dev_code/playground/send_rc_control_test.py","file_name":"send_rc_control_test.py","file_ext":"py","file_size_in_byte":7513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11112080053","text":"\"\"\"\nConcrete object tests\n\"\"\"\nfrom acitoolkit import (\n Node, Table\n)\nfrom acitoolkit.aciConcreteLib import (\n ConcreteArp, ConcreteArpDomain, ConcreteArpEntry,\n ConcreteVpc, ConcreteVpcIf,\n ConcreteContext, ConcreteBD, ConcreteSVI,\n ConcreteLoopback, ConcreteAccCtrlRule,\n ConcreteFilter, ConcreteFilterEntry, ConcreteEp,\n ConcretePortChannel, ConcreteTunnel, ConcreteOverlay,\n ConcreteCdp, ConcreteCdpIf, ConcreteCdpAdjEp)\nimport unittest\n\n\nclass TestConcreteArp(unittest.TestCase):\n \"\"\"\n Test the ConcreteArp class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteArp creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteArp = ConcreteArp(node)\n self.assertNotEqual(concreteArp, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteArp._get_parent_class(), Node)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteArp._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-103/sys/arp/inst'\n self.assertEquals(ConcreteArp._get_name_from_dn(dn), '')\n\n def test_children_concrete_classes(self):\n \"\"\"\n Test ConcreteArp _get_children_concrete_classes returns something list-like\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteArp = ConcreteArp(node)\n self.assertTrue(\n isinstance(\n concreteArp._get_children_concrete_classes(),\n list))\n for child in concreteArp._get_children_concrete_classes():\n self.assertFalse(isinstance(child, ConcreteArpDomain))\n\n def test_eq(self):\n \"\"\"\n Test ConcreteArp __eq__ function\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteArp = ConcreteArp(node)\n node_id2 = '102'\n node2 = Node(node_id2)\n concreteArp2 = ConcreteArp(node2)\n self.assertEqual(concreteArp, concreteArp2)\n\n def test_get_table(self):\n \"\"\"\n Test ConcreteArp create table function\n \"\"\"\n node1 = Node('103')\n node2 = Node('102')\n concreteArp1 = ConcreteArp(node1)\n concreteArp2 = ConcreteArp(node2)\n concreteArps = [concreteArp1, concreteArp2]\n self.assertTrue(\n isinstance(\n ConcreteArp.get_table(concreteArps)[0],\n Table))\n\n\nclass TestConcreteArpDomain(unittest.TestCase):\n \"\"\"\n Test the ConcreteArpDomain class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteArpDomain creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteArp = ConcreteArp(node)\n concreteArpDomain = ConcreteArpDomain(concreteArp)\n self.assertNotEqual(concreteArpDomain, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteArpDomain._get_parent_class(), ConcreteArp)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteArpDomain._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-103/sys/arp/inst/dom-Tenant1:T1-CTX2'\n self.assertEquals(\n ConcreteArpDomain._get_name_from_dn(dn),\n 'Tenant1:T1-CTX2')\n\n def test_children_concrete_classes(self):\n \"\"\"\n Test ConcreteArpDomain _get_children_concrete_classes returns something list-like\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteArp = ConcreteArp(node)\n concreteArpDomain = ConcreteArpDomain(concreteArp)\n self.assertTrue(\n isinstance(\n concreteArpDomain._get_children_concrete_classes(),\n list))\n for child in concreteArpDomain._get_children_concrete_classes():\n self.assertFalse(isinstance(child, ConcreteArpEntry))\n\n\nclass TestConcreteArpEntry(unittest.TestCase):\n \"\"\"\n Test the ConcreteArpEntry class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteArpEntry creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteArp = ConcreteArp(node)\n concreteArpDomain = ConcreteArpDomain(concreteArp)\n concreteArpEntry = ConcreteArpEntry(concreteArpDomain)\n self.assertNotEqual(concreteArpEntry, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(\n ConcreteArpEntry._get_parent_class(),\n ConcreteArpDomain)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteArpEntry._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-103/sys/arp/inst/dom-Tenant-SharedService:Shared_Service/db-ip/adj-[eth1/40.69]-[40.40.41.2]'\n self.assertEquals(ConcreteArpEntry._get_name_from_dn(dn), '[eth1')\n\n\nclass TestConcreteVpc(unittest.TestCase):\n \"\"\"\n Test the ConcreteVpc class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteVpc creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteVpc = ConcreteVpc(node)\n self.assertNotEqual(concreteVpc, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteVpc._get_parent_class(), Node)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteVpc._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-103/sys/vpc'\n self.assertEquals(ConcreteVpc._get_name_from_dn(dn), '')\n\n def test_eq(self):\n \"\"\"\n Test ConcreteVpc __eq__ function\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteVpc = ConcreteVpc(node)\n node_id2 = '102'\n node2 = Node(node_id2)\n concreteVpc2 = ConcreteVpc(node2)\n self.assertEqual(concreteVpc, concreteVpc2)\n\n def test_get_table(self):\n \"\"\"\n Test ConcreteVpc create table function\n \"\"\"\n node1 = Node('103')\n node2 = Node('102')\n concreteVpc1 = ConcreteVpc(node1)\n concreteVpc2 = ConcreteVpc(node2)\n concreteVpc1.attr['admin_state'] = 'enabled'\n concreteVpc2.attr['admin_state'] = 'enabled'\n concreteVpc1.attr['dom_present'] = True\n concreteVpc2.attr['dom_present'] = True\n concreteVpcs = [concreteVpc1, concreteVpc2]\n self.assertTrue(\n isinstance(\n ConcreteVpc.get_table(concreteVpcs)[0],\n Table))\n\n\nclass TestConcreteVpcIf(unittest.TestCase):\n \"\"\"\n Test the ConcreteVpcIf class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteVpcIf creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteVpc = ConcreteVpc(node)\n concreteVpcIf = ConcreteVpcIf(concreteVpc)\n self.assertNotEqual(concreteVpcIf, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteVpcIf._get_parent_class(), ConcreteVpc)\n\n def test_get_table(self):\n \"\"\"\n Test ConcreteVpcIf create table function\n \"\"\"\n node1 = Node('103')\n node2 = Node('102')\n concreteVpc1 = ConcreteVpc(node1)\n concreteVpc2 = ConcreteVpc(node2)\n concreteVpcIf1 = ConcreteVpcIf(concreteVpc1)\n concreteVpcIf2 = ConcreteVpcIf(concreteVpc2)\n\n concreteVpcIfs = [concreteVpcIf1, concreteVpcIf2]\n self.assertTrue(\n isinstance(\n ConcreteVpcIf.get_table(concreteVpcIfs)[0],\n Table))\n\n\nclass TestConcreteContext(unittest.TestCase):\n \"\"\"\n Test the ConcreteContext class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteContext creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteContext = ConcreteContext(node)\n self.assertNotEqual(concreteContext, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteContext._get_parent_class(), Node)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteContext._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-202/sys/ctx-[vxlan-2129920]'\n self.assertEquals(\n ConcreteContext._get_name_from_dn(dn),\n '[vxlan-2129920]')\n\n def test_eq(self):\n \"\"\"\n Test ConcreteContext __eq__ function\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteContext = ConcreteContext(node)\n node_id2 = '102'\n node2 = Node(node_id2)\n concreteContext2 = ConcreteContext(node2)\n self.assertEqual(concreteContext, concreteContext2)\n\n def test_get_table(self):\n \"\"\"\n Test ConcreteContext create table function\n \"\"\"\n node1 = Node('103')\n node2 = Node('102')\n concreteContext1 = ConcreteContext(node1)\n concreteContext2 = ConcreteContext(node2)\n concreteContext1.attr['tenant'] = 'tenant1'\n concreteContext1.attr['context'] = 'context1'\n concreteContext2.attr['tenant'] = 'tenant2'\n concreteContext2.attr['context'] = 'context2'\n concreteContexts = [concreteContext1, concreteContext2]\n self.assertTrue(\n isinstance(\n ConcreteContext.get_table(concreteContexts)[0],\n Table))\n\n\nclass TestConcreteSVI(unittest.TestCase):\n \"\"\"\n Test the ConcreteSVI class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteSVI creation\n \"\"\"\n node_id = '102'\n node = Node(node_id)\n concreteBd = ConcreteBD(node)\n concreteSVI = ConcreteSVI(concreteBd)\n self.assertNotEqual(concreteSVI, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteSVI._get_parent_class(), ConcreteBD)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteSVI._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-102/sys/ctx-[vxlan-2293760]/bd-[vxlan-14811120]/svi-[vlan14]'\n self.assertEquals(ConcreteSVI._get_name_from_dn(dn), '[vlan14]')\n\n def test_eq(self):\n \"\"\"\n Test ConcreteSVI __eq__ function\n \"\"\"\n node_id = '102'\n node = Node(node_id)\n concreteBd = ConcreteBD(node)\n concreteSVI = ConcreteSVI(concreteBd)\n node_id = '102'\n node = Node(node_id)\n concreteBd1 = ConcreteBD(node)\n concreteSVI1 = ConcreteSVI(concreteBd1)\n self.assertEqual(concreteSVI, concreteSVI1)\n\n def test_get_table(self):\n \"\"\"\n Test ConcreteSVI create table function\n \"\"\"\n node_id = '102'\n node = Node(node_id)\n concreteBd = ConcreteBD(node)\n concreteSVI = ConcreteSVI(concreteBd)\n node_id = '102'\n node = Node(node_id)\n concreteBd1 = ConcreteBD(node)\n concreteSVI1 = ConcreteSVI(concreteBd1)\n concreteSVIs = [concreteSVI, concreteSVI1]\n self.assertTrue(\n isinstance(\n ConcreteSVI.get_table(concreteSVIs)[0],\n Table))\n\n\nclass TestConcreteLoopback(unittest.TestCase):\n \"\"\"\n Test the ConcreteLoopback class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteLoopback creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteLoopback = ConcreteLoopback(node)\n self.assertNotEqual(concreteLoopback, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteLoopback._get_parent_class(), Node)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteLoopback._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-103/sys/ctx-[vxlan-2916352]/lb-[lo5]'\n self.assertEquals(ConcreteLoopback._get_name_from_dn(dn), '[lo5]')\n\n def test_eq(self):\n \"\"\"\n Test ConcreteLoopback __eq__ function\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteLoopback = ConcreteLoopback(node)\n node_id2 = '102'\n node2 = Node(node_id2)\n concreteLoopback2 = ConcreteLoopback(node2)\n self.assertEqual(concreteLoopback, concreteLoopback2)\n\n\nclass TestConcreteBD(unittest.TestCase):\n \"\"\"\n Test the ConcreteBD class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteBD creation\n \"\"\"\n node_id = '101'\n node = Node(node_id)\n concreteBD = ConcreteBD(node)\n self.assertNotEqual(concreteBD, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteBD._get_parent_class(), Node)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteBD._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-101/sys/ctx-[vxlan-2686976]/bd-[vxlan-15794151]'\n self.assertEquals(\n ConcreteBD._get_name_from_dn(dn),\n 'ctx-[vxlan-2686976]')\n\n def test_children_concrete_classes(self):\n \"\"\"\n Test ConcreteBD _get_children_concrete_classes returns something list-like\n \"\"\"\n node_id = '101'\n node = Node(node_id)\n concreteBD = ConcreteBD(node)\n self.assertTrue(\n isinstance(\n concreteBD._get_children_concrete_classes(),\n list))\n for child in concreteBD._get_children_concrete_classes():\n self.assertFalse(isinstance(child, ConcreteSVI))\n\n def test_eq(self):\n \"\"\"\n Test ConcreteBD __eq__ function\n \"\"\"\n node_id = '101'\n node = Node(node_id)\n concreteBD = ConcreteBD(node)\n node_id2 = '101'\n node2 = Node(node_id2)\n concreteBD2 = ConcreteBD(node2)\n self.assertEqual(concreteBD, concreteBD2)\n\n def test_get_table(self):\n \"\"\"\n Test ConcreteBD create table function\n \"\"\"\n node_id = '102'\n node = Node(node_id)\n concreteBd = ConcreteBD(node)\n node_id = '102'\n node = Node(node_id)\n concreteBd1 = ConcreteBD(node)\n concreteBds = [concreteBd, concreteBd1]\n self.assertTrue(\n isinstance(\n ConcreteBD.get_table(concreteBds)[0],\n Table))\n\n\nclass TestConcreteAccCtrlRule(unittest.TestCase):\n \"\"\"\n Test the ConcreteAccCtrlRule class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteAccCtrlRule creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteAccCtrlRule = ConcreteAccCtrlRule(node)\n self.assertNotEqual(concreteAccCtrlRule, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteAccCtrlRule._get_parent_class(), Node)\n\n def test_get_table(self):\n \"\"\"\n Test ConcreteAccCtrlRule create table function\n \"\"\"\n node_id1 = '102'\n node1 = Node(node_id1)\n concreteAccCtrlRule1 = ConcreteAccCtrlRule(node1)\n node_id2 = '102'\n node2 = Node(node_id2)\n concreteAccCtrlRule2 = ConcreteAccCtrlRule(node2)\n concreteAccCtrlRules = [concreteAccCtrlRule1, concreteAccCtrlRule2]\n self.assertTrue(\n isinstance(\n ConcreteAccCtrlRule.get_table(concreteAccCtrlRules)[0],\n Table))\n\n\nclass TestConcreteFilter(unittest.TestCase):\n \"\"\"\n Test the ConcreteFilter class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteFilter creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteFilter = ConcreteFilter(node)\n self.assertNotEqual(concreteFilter, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteFilter._get_parent_class(), Node)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteFilter._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-102/sys/actrl/filt-implicit'\n self.assertEquals(ConcreteFilter._get_name_from_dn(dn), 'implicit')\n\n def test_eq(self):\n \"\"\"\n Test ConcreteFilter __eq__ function\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteFilter = ConcreteFilter(node)\n node_id2 = '102'\n node2 = Node(node_id2)\n concreteFilter2 = ConcreteFilter(node2)\n self.assertEqual(concreteFilter, concreteFilter2)\n\n\nclass TestConcreteFilterEntry(unittest.TestCase):\n \"\"\"\n Test the ConcreteFilterEntry class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteFilterEntry creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteFilter = ConcreteFilter(node)\n concreteFilterEntry = ConcreteFilterEntry(concreteFilter)\n self.assertNotEqual(concreteFilterEntry, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(\n ConcreteFilterEntry._get_parent_class(),\n ConcreteFilter)\n\n def test_eq(self):\n \"\"\"\n Test ConcreteFilterEntry __eq__ function\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteFilter = ConcreteFilter(node)\n concreteFilterEntry = ConcreteFilterEntry(concreteFilter)\n node_id2 = '102'\n node2 = Node(node_id2)\n concreteFilter2 = ConcreteFilter(node2)\n concreteFilterEntry2 = ConcreteFilterEntry(concreteFilter2)\n self.assertEqual(concreteFilterEntry, concreteFilterEntry2)\n\n\nclass TestConcreteEp(unittest.TestCase):\n \"\"\"\n Test the ConcreteEp class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteEp creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteEp = ConcreteEp(node)\n self.assertNotEqual(concreteEp, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteEp._get_parent_class(), Node)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteEp._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-103/sys/ctx-[vxlan-2293760]/bd-[vxlan-15597456]/db-ep/ip-[100.100.101.1]'\n self.assertEquals(ConcreteEp._get_name_from_dn(dn), '[vxlan-2293760]')\n\n def test_eq(self):\n \"\"\"\n Test ConcreteEp __eq__ function\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteEp = ConcreteEp(node)\n node_id2 = '102'\n node2 = Node(node_id2)\n concreteEp2 = ConcreteEp(node2)\n self.assertEqual(concreteEp, concreteEp2)\n\n def test_get_table(self):\n \"\"\"\n Test ConcreteEp create table function\n \"\"\"\n node1 = Node('103')\n node2 = Node('102')\n concreteEp1 = ConcreteEp(node1)\n concreteEp2 = ConcreteEp(node2)\n concreteEp1.attr['tenant'] = 'tenant1'\n concreteEp1.attr['context'] = 'context1'\n concreteEp1.attr['bridge_domain'] = 'bridge_domain1'\n concreteEp2.attr['tenant'] = 'tenant2'\n concreteEp2.attr['context'] = 'context2'\n concreteEp2.attr['bridge_domain'] = 'bridge_domain2'\n concreteEps = [concreteEp1, concreteEp2]\n self.assertTrue(\n isinstance(\n ConcreteEp.get_table(concreteEps)[0],\n Table))\n\n\nclass TestConcretePortChannel(unittest.TestCase):\n \"\"\"\n Test the ConcretePortChannel class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcretePortChannel creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concretePortChannel = ConcretePortChannel(node)\n self.assertNotEqual(concretePortChannel, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcretePortChannel._get_parent_class(), Node)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcretePortChannel._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-101/sys/aggr-[po3]'\n self.assertEquals(ConcretePortChannel._get_name_from_dn(dn), '[po3]')\n\n def test_eq(self):\n \"\"\"\n Test ConcretePortChannel __eq__ function\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concretePortChannel = ConcretePortChannel(node)\n node_id2 = '102'\n node2 = Node(node_id2)\n concretePortChannel2 = ConcretePortChannel(node2)\n self.assertEqual(concretePortChannel, concretePortChannel2)\n\n def test_get_table(self):\n \"\"\"\n Test ConcretePortChannel create table function\n \"\"\"\n node1 = Node('103')\n node2 = Node('102')\n concretePortChannel1 = ConcretePortChannel(node1)\n concretePortChannel2 = ConcretePortChannel(node2)\n concretePortChannel1.attr['id'] = '1'\n concretePortChannel2.attr['id'] = '2'\n concretePortChannels = [concretePortChannel1, concretePortChannel2]\n self.assertTrue(\n isinstance(\n ConcretePortChannel.get_table(concretePortChannels)[0],\n Table))\n\n\nclass TestConcreteTunnel(unittest.TestCase):\n \"\"\"\n Test the ConcreteTunnel class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteTunnel creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteOverlay = ConcreteOverlay(node)\n concreteTunnel = ConcreteTunnel(concreteOverlay)\n self.assertNotEqual(concreteTunnel, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteTunnel._get_parent_class(), ConcreteOverlay)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteTunnel._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-201/sys/tunnel-[tunnel1]'\n self.assertEquals(ConcreteTunnel._get_name_from_dn(dn), '[tunnel1]')\n\n def test_children_concrete_classes(self):\n \"\"\"\n Test ConcreteTunnel _get_children_concrete_classes returns something list-like\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteOverlay = ConcreteOverlay(node)\n concreteTunnel = ConcreteTunnel(concreteOverlay)\n self.assertTrue(\n isinstance(\n concreteTunnel._get_children_concrete_classes(),\n list))\n self.assertTrue(\n len(concreteTunnel._get_children_concrete_classes()) == 0)\n\n def test_get_table(self):\n \"\"\"\n Test ConcreteTunnel create table function\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteOverlay = ConcreteOverlay(node)\n concreteTunnel1 = ConcreteTunnel(concreteOverlay)\n node_id = '103'\n node = Node(node_id)\n concreteOverlay = ConcreteOverlay(node)\n concreteTunnel2 = ConcreteTunnel(concreteOverlay)\n concreteTunnels = [concreteTunnel1, concreteTunnel2]\n self.assertTrue(\n isinstance(\n ConcreteTunnel.get_table(concreteTunnels)[0],\n Table))\n\n\nclass TestConcreteOverlay(unittest.TestCase):\n \"\"\"\n Test the ConcreteOverlay class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteOverlay creation\n \"\"\"\n node_id = '201'\n node = Node(node_id)\n concreteOverlay = ConcreteOverlay(node)\n self.assertNotEqual(concreteOverlay, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteOverlay._get_parent_class(), Node)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteOverlay._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-201/overlay'\n self.assertEquals(ConcreteOverlay._get_name_from_dn(dn), '')\n\n def test_children_concrete_classes(self):\n \"\"\"\n Test ConcreteOverlay _get_children_concrete_classes returns something list-like\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteOverlay = ConcreteOverlay(node)\n self.assertTrue(\n isinstance(\n concreteOverlay._get_children_concrete_classes(),\n list))\n for child in concreteOverlay._get_children_concrete_classes():\n self.assertFalse(isinstance(child, ConcreteTunnel))\n\n def test_get_table(self):\n \"\"\"\n Test ConcreteOverlay create table function\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteOverlay1 = ConcreteOverlay(node)\n node_id = '103'\n node = Node(node_id)\n concreteOverlay2 = ConcreteOverlay(node)\n concreteOverlays = [concreteOverlay1, concreteOverlay2]\n self.assertTrue(\n isinstance(\n ConcreteOverlay.get_table(concreteOverlays)[0],\n Table))\n\n\nclass TestConcreteCdp(unittest.TestCase):\n \"\"\"\n Test the ConcreteCdp class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteCdp creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteCdp = ConcreteCdp(node)\n self.assertNotEqual(concreteCdp, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteCdp._get_parent_class(), Node)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteCdp._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-103/sys/cdp/inst'\n self.assertEquals(ConcreteCdp._get_name_from_dn(dn), '')\n\n def test_children_concrete_classes(self):\n \"\"\"\n Test ConcreteCdp _get_children_concrete_classes returns something list-like\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteCdp = ConcreteCdp(node)\n self.assertTrue(\n isinstance(\n concreteCdp._get_children_concrete_classes(),\n list))\n for child in concreteCdp._get_children_concrete_classes():\n self.assertFalse(isinstance(child, ConcreteCdpIf))\n\n def test_eq(self):\n \"\"\"\n Test ConcreteCdp __eq__ function\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteCdp = ConcreteCdp(node)\n node_id2 = '102'\n node2 = Node(node_id2)\n concreteCdp2 = ConcreteCdp(node2)\n self.assertEqual(concreteCdp, concreteCdp2)\n\n def test_get_table(self):\n \"\"\"\n Test ConcreteCdp create table function\n \"\"\"\n node_id1 = '103'\n node1 = Node(node_id1)\n concreteCdp1 = ConcreteCdp(node1)\n concreteCdpIf1 = ConcreteCdpIf(concreteCdp1)\n concreteCdp1.add_child(concreteCdpIf1)\n node_id2 = '103'\n node2 = Node(node_id2)\n concreteCdp2 = ConcreteCdp(node2)\n concreteCdpIf2 = ConcreteCdpIf(concreteCdp2)\n concreteCdp2.add_child(concreteCdpIf2)\n concreteCdps = [concreteCdp1, concreteCdp2]\n self.assertTrue(\n isinstance(\n ConcreteCdp.get_table(concreteCdps)[0],\n Table))\n\n\nclass TestConcreteCdpIf(unittest.TestCase):\n \"\"\"\n Test the ConcreteCdpIf class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteCdpIf creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteCdp = ConcreteCdp(node)\n concreteCdpIf = ConcreteCdpIf(concreteCdp)\n self.assertNotEqual(concreteCdpIf, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteCdpIf._get_parent_class(), ConcreteCdp)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteCdpIf._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-103/sys/cdp/inst/if-[eth1/17]'\n self.assertEquals(ConcreteCdpIf._get_name_from_dn(dn), '[eth1')\n\n def test_children_concrete_classes(self):\n \"\"\"\n Test ConcreteCdpIf _get_children_concrete_classes returns something list-like\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteCdp = ConcreteCdp(node)\n concreteCdpIf = ConcreteCdpIf(concreteCdp)\n self.assertTrue(\n isinstance(\n concreteCdpIf._get_children_concrete_classes(),\n list))\n for child in concreteCdpIf._get_children_concrete_classes():\n self.assertFalse(isinstance(child, ConcreteCdpAdjEp))\n\n\nclass TestConcreteCdpAdjEp(unittest.TestCase):\n \"\"\"\n Test the ConcreteCdpAdjEp class\n \"\"\"\n\n def test_create(self):\n \"\"\"\n ConcreteCdpAdjEp creation\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteCdp = ConcreteCdp(node)\n concreteCdpIf = ConcreteCdpIf(concreteCdp)\n concreteCdpAdjEp = ConcreteCdpAdjEp(concreteCdpIf)\n self.assertNotEqual(concreteCdpAdjEp, None)\n\n def test_get_parent_class(self):\n \"\"\"\n Ensure class has the correct parent class\n \"\"\"\n self.assertEquals(ConcreteCdpAdjEp._get_parent_class(), ConcreteCdpIf)\n\n def test_get_name_from_dn(self):\n \"\"\"\n Test that ConcreteCdpAdjEp._get_name_from_dn returns the name\n derived from the dn provided\n \"\"\"\n dn = 'topology/pod-1/node-103/sys/cdp/inst/if-[eth1/17]/adj-1'\n self.assertEquals(ConcreteCdpAdjEp._get_name_from_dn(dn), '1')\n\n def test_children_concrete_classes(self):\n \"\"\"\n Test ConcreteCdpAdjEp _get_children_concrete_classes returns something list-like\n \"\"\"\n node_id = '103'\n node = Node(node_id)\n concreteCdp = ConcreteCdp(node)\n concreteCdpIf = ConcreteCdpIf(concreteCdp)\n concreteCdpAdjEp = ConcreteCdpAdjEp(concreteCdpIf)\n self.assertTrue(\n isinstance(\n concreteCdpAdjEp._get_children_concrete_classes(),\n list))\n self.assertTrue(\n len(concreteCdpAdjEp._get_children_concrete_classes()) == 0)\n\nif __name__ == '__main__':\n offline = unittest.TestSuite()\n offline.addTest(unittest.makeSuite(TestConcreteArp))\n offline.addTest(unittest.makeSuite(TestConcreteArpDomain))\n unittest.main()\n","repo_name":"datacenter/acitoolkit","sub_path":"tests/aciConcreteLib_test.py","file_name":"aciConcreteLib_test.py","file_ext":"py","file_size_in_byte":31472,"program_lang":"python","lang":"en","doc_type":"code","stars":341,"dataset":"github-code","pt":"81"} +{"seq_id":"72537772425","text":"from gpiozero import MotionSensor\n\n#PIR data pin to BCM#21, +5v for power\npir = MotionSensor(21, sample_rate=1) \n#Best to turn PIR's SENSITIVITY DOWN and the DELAY UP\n\ndef flag1():\n flag = open('/path/to/your/flag', 'w')\n flag.write('1\\n')\n flag.close()\n\ndef flag0():\n flag = open('/path/to/your/flag', 'w')\n flag.write('0\\n')\n flag.close\n\nwhile True:\n pir.wait_for_motion()\n flag1()\n pir.wait_for_no_motion()\n flag0()\n","repo_name":"bclittleua/discobots","sub_path":"physical_status/desk_alert.py","file_name":"desk_alert.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18367066625","text":"from django import forms\nfrom .models import Comment\n\n\nclass CommentForm(forms.ModelForm):\n author = forms.CharField(\n label='Author', max_length=100, required=True,\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n content = forms.CharField(\n label='Content', required=True,\n widget=forms.Textarea(attrs={'class': 'form-control'})\n )\n class Meta:\n model = Comment\n fields = ['author', 'content']\n","repo_name":"yeonghan/yozora","sub_path":"first_django/blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"594034794","text":"import os\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Callable, TypeVar, Generic, Sequence, Type, List\n\nimport papis.config\nimport papis.document\nimport papis.plugin\nimport papis.logging\n\nlogger = papis.logging.get_logger(__name__)\n\n#: Name of the entry points for :class:`Picker` plugins.\nPICKER_EXTENSION_NAME = \"papis.picker\"\n\n#: Invariant :class:`~typing.TypeVar` with no bounds.\nT = TypeVar(\"T\")\n\n\nclass Picker(ABC, Generic[T]):\n \"\"\"An interface used to select items from a list.\n\n .. automethod:: __call__\n \"\"\"\n\n @abstractmethod\n def __call__(\n self,\n items: Sequence[T],\n header_filter: Callable[[T], str],\n match_filter: Callable[[T], str],\n default_index: int = 0\n ) -> List[T]:\n \"\"\"\n :arg items: a sequence of items from which to pick a subset.\n :arg header_filter: (optional) a callable that takes an item from *items*\n and returns a string representation shown to the user.\n :arg match_filter: (optional) a callable that takes an item from *items*\n and returns a string representation that is used when searching or\n filtering the items.\n :arg default_index: (optional) sets the selected item when the picker\n is first shown to the user.\n\n :returns: a subset of *items* that were picked.\n \"\"\"\n\n\ndef get_picker(name: str) -> Type[Picker[Any]]:\n \"\"\"Get a picker by its plugin name.\n\n :arg name: the name of an entrypoint to load a :class:`Picker` plugin from.\n :returns: a :class:`Picker` subclass implemented in the plugin.\n \"\"\"\n picker: Type[Picker[Any]] = (\n papis.plugin.get_extension_manager(PICKER_EXTENSION_NAME)[name].plugin\n )\n\n return picker\n\n\ndef pick(items: Sequence[T],\n header_filter: Callable[[T], str] = str,\n match_filter: Callable[[T], str] = str,\n default_index: int = 0) -> List[T]:\n \"\"\"Load a :class:`Picker` plugin and select a subset of *items*.\n\n The arguments to this function match those of :meth:`Picker.__call__`. The\n specific picker is chosen through the :ref:`config-settings-picktool`\n configuration option.\n\n :returns: a subset of *items* that were picked.\n \"\"\"\n\n name = papis.config.getstring(\"picktool\")\n try:\n picker: Type[Picker[T]] = get_picker(name)\n except KeyError:\n entrypoints = papis.plugin.get_available_entrypoints(PICKER_EXTENSION_NAME)\n logger.error(\n \"Invalid picker: '%s'. Registered pickers are '%s'.\",\n name, \"', '\".join(entrypoints))\n return []\n else:\n return picker()(items,\n header_filter,\n match_filter,\n default_index)\n\n\ndef pick_doc(\n documents: Sequence[papis.document.Document]\n ) -> List[papis.document.Document]:\n \"\"\"Pick from a sequence of *documents* using :func:`pick`.\n\n This function uses the :ref:`config-settings-header-format-file` setting\n or, if not available, the :ref:`config-settings-header-format` setting\n to construct a *header_filter* for the picker. It also uses the configuration\n setting :ref:`config-settings-match-format` to construct a *match_filter*.\n\n :arg documents: a sequence of documents.\n :returns: a subset of *documents* that was picked.\n \"\"\"\n\n header_format_path = papis.config.get(\"header-format-file\")\n if header_format_path is not None:\n with open(os.path.expanduser(header_format_path)) as fd:\n header_format = fd.read().rstrip()\n else:\n header_format = papis.config.getstring(\"header-format\")\n match_format = papis.config.getstring(\"match-format\")\n\n from functools import partial\n\n header_filter = partial(papis.format.format, header_format)\n match_filter = partial(papis.format.format, match_format)\n\n return pick(documents,\n header_filter=header_filter,\n match_filter=match_filter)\n\n\ndef pick_subfolder_from_lib(lib: str) -> List[str]:\n \"\"\"Pick subfolders from all existings subfolders in *lib*.\n\n Note that this includes document folders in *lib* as well nested library\n folders.\n\n :arg lib: the name of an existing library to search in.\n :returns: a subset of the subfolders in the library.\n \"\"\"\n import papis.api\n documents = papis.api.get_all_documents_in_lib(lib)\n\n # get all document directories\n folders = [os.path.dirname(str(d.get_main_folder())) for d in documents]\n # get all library directories\n folders.append(*papis.config.get_lib_dirs())\n # remove duplicates and sort paths\n folders = sorted(set(folders))\n\n return pick(folders)\n","repo_name":"papis/papis","sub_path":"papis/pick.py","file_name":"pick.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","stars":1237,"dataset":"github-code","pt":"81"} +{"seq_id":"32853477896","text":"#--------------------Ejercicio 1--------------------\r\n\r\nimport random\r\n\r\ndef experiment(path, time):\r\n if path == 1:\r\n chosen_path= random.randint(1, 3)\r\n time += 3\r\n return experiment(chosen_path, time)\r\n elif path == 2:\r\n chosen_path= random.randint(1, 3)\r\n time += 5\r\n return experiment(chosen_path, time)\r\n elif path == 3:\r\n time += 7\r\n return time\r\n\r\nchosen_path= random.randint(1, 3)\r\ntime= 0\r\n#print(chosen_path)\r\nprint(f\"La rata tardó {experiment(chosen_path, time)} minutos en salir de la jaula\")\r\n\r\n#--------------------Ejercicio 2--------------------\r\n\r\n#Consigna apropiada para la función presentada:\r\n'''Crea una función recursiva llamada \"f\" que toma un número (\"n\") entero positivo como entrada y devuelve su versión invertida'''","repo_name":"Joako64110/TrabajosS1-ProgramacionI-Comision4","sub_path":"Ejercicio de Recursión/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36617982254","text":"\nclass książkaAdresowa():\n def __init__(self, imię, nazwisko, firma, email):\n self.imię = imię\n self.nazwisko = nazwisko\n self.firma = firma\n self.email = email\n\nPawlak = książkaAdresowa(imię = \"Halina\", nazwisko = \"Pawlak\", firma = \"Żabka\", email = \"hp01@gmail.com\")\nKawiatkowski = książkaAdresowa(imię = \"Janusz\", nazwisko=\"Kwiatkowski\", firma = \"Auchan\", email = \"j.kwiat@gmail.com\")\nNowak = książkaAdresowa(imię = \"Anna\", nazwisko= \"Nowak\", firma = \"Centrum\", email = \"anna.nowak@wp.pl\")\nWojciechowski = książkaAdresowa(imię = \"Rajmund\", nazwisko = \"Wojciechowski\", firma = \"Tesco\", email = \"rajwoj@wp.pl\")\nPiotrowski = książkaAdresowa(imię = \"Mariusz\", nazwisko=\"Piotrowski\", firma = \"Fresh\", email = \"piotrkowski.m@gmail.com\")\n\nosoby = [Pawlak, Kawiatkowski, Nowak, Wojciechowski, Piotrowski]\nprint(Pawlak.name, Pawlak.nazwisko, Pawlak.email)\n","repo_name":"dorota-jagodzka/movie_library","sub_path":"książka_adresowa.py","file_name":"książka_adresowa.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}